1//===- SelectionDAGBuilder.cpp - Selection-DAG building -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This implements routines for translating from LLVM IR into SelectionDAG IR.
10//
11//===----------------------------------------------------------------------===//
12
13#include "SelectionDAGBuilder.h"
14#include "SDNodeDbgValue.h"
15#include "llvm/ADT/APFloat.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/BitVector.h"
18#include "llvm/ADT/None.h"
19#include "llvm/ADT/Optional.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallPtrSet.h"
22#include "llvm/ADT/SmallSet.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/Triple.h"
25#include "llvm/ADT/Twine.h"
26#include "llvm/Analysis/AliasAnalysis.h"
27#include "llvm/Analysis/BlockFrequencyInfo.h"
28#include "llvm/Analysis/BranchProbabilityInfo.h"
29#include "llvm/Analysis/ConstantFolding.h"
30#include "llvm/Analysis/EHPersonalities.h"
31#include "llvm/Analysis/Loads.h"
32#include "llvm/Analysis/MemoryLocation.h"
33#include "llvm/Analysis/ProfileSummaryInfo.h"
34#include "llvm/Analysis/TargetLibraryInfo.h"
35#include "llvm/Analysis/ValueTracking.h"
36#include "llvm/Analysis/VectorUtils.h"
37#include "llvm/CodeGen/Analysis.h"
38#include "llvm/CodeGen/FunctionLoweringInfo.h"
39#include "llvm/CodeGen/GCMetadata.h"
40#include "llvm/CodeGen/MachineBasicBlock.h"
41#include "llvm/CodeGen/MachineFrameInfo.h"
42#include "llvm/CodeGen/MachineFunction.h"
43#include "llvm/CodeGen/MachineInstr.h"
44#include "llvm/CodeGen/MachineInstrBuilder.h"
45#include "llvm/CodeGen/MachineJumpTableInfo.h"
46#include "llvm/CodeGen/MachineMemOperand.h"
47#include "llvm/CodeGen/MachineModuleInfo.h"
48#include "llvm/CodeGen/MachineOperand.h"
49#include "llvm/CodeGen/MachineRegisterInfo.h"
50#include "llvm/CodeGen/RuntimeLibcalls.h"
51#include "llvm/CodeGen/SelectionDAG.h"
52#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
53#include "llvm/CodeGen/StackMaps.h"
54#include "llvm/CodeGen/SwiftErrorValueTracking.h"
55#include "llvm/CodeGen/TargetFrameLowering.h"
56#include "llvm/CodeGen/TargetInstrInfo.h"
57#include "llvm/CodeGen/TargetOpcodes.h"
58#include "llvm/CodeGen/TargetRegisterInfo.h"
59#include "llvm/CodeGen/TargetSubtargetInfo.h"
60#include "llvm/CodeGen/WinEHFuncInfo.h"
61#include "llvm/IR/Argument.h"
62#include "llvm/IR/Attributes.h"
63#include "llvm/IR/BasicBlock.h"
64#include "llvm/IR/CFG.h"
65#include "llvm/IR/CallingConv.h"
66#include "llvm/IR/Constant.h"
67#include "llvm/IR/ConstantRange.h"
68#include "llvm/IR/Constants.h"
69#include "llvm/IR/DataLayout.h"
70#include "llvm/IR/DebugInfoMetadata.h"
71#include "llvm/IR/DerivedTypes.h"
72#include "llvm/IR/Function.h"
73#include "llvm/IR/GetElementPtrTypeIterator.h"
74#include "llvm/IR/InlineAsm.h"
75#include "llvm/IR/InstrTypes.h"
76#include "llvm/IR/Instructions.h"
77#include "llvm/IR/IntrinsicInst.h"
78#include "llvm/IR/Intrinsics.h"
79#include "llvm/IR/IntrinsicsAArch64.h"
80#include "llvm/IR/IntrinsicsWebAssembly.h"
81#include "llvm/IR/LLVMContext.h"
82#include "llvm/IR/Metadata.h"
83#include "llvm/IR/Module.h"
84#include "llvm/IR/Operator.h"
85#include "llvm/IR/PatternMatch.h"
86#include "llvm/IR/Statepoint.h"
87#include "llvm/IR/Type.h"
88#include "llvm/IR/User.h"
89#include "llvm/IR/Value.h"
90#include "llvm/MC/MCContext.h"
91#include "llvm/MC/MCSymbol.h"
92#include "llvm/Support/AtomicOrdering.h"
93#include "llvm/Support/Casting.h"
94#include "llvm/Support/CommandLine.h"
95#include "llvm/Support/Compiler.h"
96#include "llvm/Support/Debug.h"
97#include "llvm/Support/MathExtras.h"
98#include "llvm/Support/raw_ostream.h"
99#include "llvm/Target/TargetIntrinsicInfo.h"
100#include "llvm/Target/TargetMachine.h"
101#include "llvm/Target/TargetOptions.h"
102#include "llvm/Transforms/Utils/Local.h"
103#include <cstddef>
104#include <cstring>
105#include <iterator>
106#include <limits>
107#include <numeric>
108#include <tuple>
109
110using namespace llvm;
111using namespace PatternMatch;
112using namespace SwitchCG;
113
114#define DEBUG_TYPE "isel"
115
116/// LimitFloatPrecision - Generate low-precision inline sequences for
117/// some float libcalls (6, 8 or 12 bits).
118static unsigned LimitFloatPrecision;
119
120static cl::opt<bool>
121 InsertAssertAlign("insert-assert-align", cl::init(true),
122 cl::desc("Insert the experimental `assertalign` node."),
123 cl::ReallyHidden);
124
125static cl::opt<unsigned, true>
126 LimitFPPrecision("limit-float-precision",
127 cl::desc("Generate low-precision inline sequences "
128 "for some float libcalls"),
129 cl::location(LimitFloatPrecision), cl::Hidden,
130 cl::init(0));
131
132static cl::opt<unsigned> SwitchPeelThreshold(
133 "switch-peel-threshold", cl::Hidden, cl::init(66),
134 cl::desc("Set the case probability threshold for peeling the case from a "
135 "switch statement. A value greater than 100 will void this "
136 "optimization"));
137
138// Limit the width of DAG chains. This is important in general to prevent
139// DAG-based analysis from blowing up. For example, alias analysis and
140// load clustering may not complete in reasonable time. It is difficult to
141// recognize and avoid this situation within each individual analysis, and
142// future analyses are likely to have the same behavior. Limiting DAG width is
143// the safe approach and will be especially important with global DAGs.
144//
145// MaxParallelChains default is arbitrarily high to avoid affecting
146// optimization, but could be lowered to improve compile time. Any ld-ld-st-st
147// sequence over this should have been converted to llvm.memcpy by the
148// frontend. It is easy to induce this behavior with .ll code such as:
149// %buffer = alloca [4096 x i8]
150// %data = load [4096 x i8]* %argPtr
151// store [4096 x i8] %data, [4096 x i8]* %buffer
152static const unsigned MaxParallelChains = 64;
153
154static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
155 const SDValue *Parts, unsigned NumParts,
156 MVT PartVT, EVT ValueVT, const Value *V,
157 Optional<CallingConv::ID> CC);
158
159/// getCopyFromParts - Create a value that contains the specified legal parts
160/// combined into the value they represent. If the parts combine to a type
161/// larger than ValueVT then AssertOp can be used to specify whether the extra
162/// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
163/// (ISD::AssertSext).
164static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL,
165 const SDValue *Parts, unsigned NumParts,
166 MVT PartVT, EVT ValueVT, const Value *V,
167 Optional<CallingConv::ID> CC = None,
168 Optional<ISD::NodeType> AssertOp = None) {
169 // Let the target assemble the parts if it wants to
170 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
171 if (SDValue Val = TLI.joinRegisterPartsIntoValue(DAG, DL, Parts, NumParts,
172 PartVT, ValueVT, CC))
173 return Val;
174
175 if (ValueVT.isVector())
176 return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT, V,
177 CC);
178
179 assert(NumParts > 0 && "No parts to assemble!");
180 SDValue Val = Parts[0];
181
182 if (NumParts > 1) {
183 // Assemble the value from multiple parts.
184 if (ValueVT.isInteger()) {
185 unsigned PartBits = PartVT.getSizeInBits();
186 unsigned ValueBits = ValueVT.getSizeInBits();
187
188 // Assemble the power of 2 part.
189 unsigned RoundParts =
190 (NumParts & (NumParts - 1)) ? 1 << Log2_32(NumParts) : NumParts;
191 unsigned RoundBits = PartBits * RoundParts;
192 EVT RoundVT = RoundBits == ValueBits ?
193 ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
194 SDValue Lo, Hi;
195
196 EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
197
198 if (RoundParts > 2) {
199 Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2,
200 PartVT, HalfVT, V);
201 Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
202 RoundParts / 2, PartVT, HalfVT, V);
203 } else {
204 Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
205 Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
206 }
207
208 if (DAG.getDataLayout().isBigEndian())
209 std::swap(Lo, Hi);
210
211 Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
212
213 if (RoundParts < NumParts) {
214 // Assemble the trailing non-power-of-2 part.
215 unsigned OddParts = NumParts - RoundParts;
216 EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
217 Hi = getCopyFromParts(DAG, DL, Parts + RoundParts, OddParts, PartVT,
218 OddVT, V, CC);
219
220 // Combine the round and odd parts.
221 Lo = Val;
222 if (DAG.getDataLayout().isBigEndian())
223 std::swap(Lo, Hi);
224 EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
225 Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
226 Hi =
227 DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
228 DAG.getConstant(Lo.getValueSizeInBits(), DL,
229 TLI.getPointerTy(DAG.getDataLayout())));
230 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
231 Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
232 }
233 } else if (PartVT.isFloatingPoint()) {
234 // FP split into multiple FP parts (for ppcf128)
235 assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
236 "Unexpected split");
237 SDValue Lo, Hi;
238 Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
239 Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
240 if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
241 std::swap(Lo, Hi);
242 Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
243 } else {
244 // FP split into integer parts (soft fp)
245 assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
246 !PartVT.isVector() && "Unexpected split");
247 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
248 Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V, CC);
249 }
250 }
251
252 // There is now one part, held in Val. Correct it to match ValueVT.
253 // PartEVT is the type of the register class that holds the value.
254 // ValueVT is the type of the inline asm operation.
255 EVT PartEVT = Val.getValueType();
256
257 if (PartEVT == ValueVT)
258 return Val;
259
260 if (PartEVT.isInteger() && ValueVT.isFloatingPoint() &&
261 ValueVT.bitsLT(PartEVT)) {
262 // For an FP value in an integer part, we need to truncate to the right
263 // width first.
264 PartEVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
265 Val = DAG.getNode(ISD::TRUNCATE, DL, PartEVT, Val);
266 }
267
268 // Handle types that have the same size.
269 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
270 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
271
272 // Handle types with different sizes.
273 if (PartEVT.isInteger() && ValueVT.isInteger()) {
274 if (ValueVT.bitsLT(PartEVT)) {
275 // For a truncate, see if we have any information to
276 // indicate whether the truncated bits will always be
277 // zero or sign-extension.
278 if (AssertOp.hasValue())
279 Val = DAG.getNode(*AssertOp, DL, PartEVT, Val,
280 DAG.getValueType(ValueVT));
281 return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
282 }
283 return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
284 }
285
286 if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
287 // FP_ROUND's are always exact here.
288 if (ValueVT.bitsLT(Val.getValueType()))
289 return DAG.getNode(
290 ISD::FP_ROUND, DL, ValueVT, Val,
291 DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout())));
292
293 return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
294 }
295
296 // Handle MMX to a narrower integer type by bitcasting MMX to integer and
297 // then truncating.
298 if (PartEVT == MVT::x86mmx && ValueVT.isInteger() &&
299 ValueVT.bitsLT(PartEVT)) {
300 Val = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Val);
301 return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
302 }
303
304 report_fatal_error("Unknown mismatch in getCopyFromParts!");
305}
306
307static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
308 const Twine &ErrMsg) {
309 const Instruction *I = dyn_cast_or_null<Instruction>(V);
310 if (!V)
311 return Ctx.emitError(ErrMsg);
312
313 const char *AsmError = ", possible invalid constraint for vector type";
314 if (const CallInst *CI = dyn_cast<CallInst>(I))
315 if (CI->isInlineAsm())
316 return Ctx.emitError(I, ErrMsg + AsmError);
317
318 return Ctx.emitError(I, ErrMsg);
319}
320
321/// getCopyFromPartsVector - Create a value that contains the specified legal
322/// parts combined into the value they represent. If the parts combine to a
323/// type larger than ValueVT then AssertOp can be used to specify whether the
324/// extra bits are known to be zero (ISD::AssertZext) or sign extended from
325/// ValueVT (ISD::AssertSext).
326static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
327 const SDValue *Parts, unsigned NumParts,
328 MVT PartVT, EVT ValueVT, const Value *V,
329 Optional<CallingConv::ID> CallConv) {
330 assert(ValueVT.isVector() && "Not a vector value");
331 assert(NumParts > 0 && "No parts to assemble!");
332 const bool IsABIRegCopy = CallConv.hasValue();
333
334 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
335 SDValue Val = Parts[0];
336
337 // Handle a multi-element vector.
338 if (NumParts > 1) {
339 EVT IntermediateVT;
340 MVT RegisterVT;
341 unsigned NumIntermediates;
342 unsigned NumRegs;
343
344 if (IsABIRegCopy) {
345 NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
346 *DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT,
347 NumIntermediates, RegisterVT);
348 } else {
349 NumRegs =
350 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
351 NumIntermediates, RegisterVT);
352 }
353
354 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
355 NumParts = NumRegs; // Silence a compiler warning.
356 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
357 assert(RegisterVT.getSizeInBits() ==
358 Parts[0].getSimpleValueType().getSizeInBits() &&
359 "Part type sizes don't match!");
360
361 // Assemble the parts into intermediate operands.
362 SmallVector<SDValue, 8> Ops(NumIntermediates);
363 if (NumIntermediates == NumParts) {
364 // If the register was not expanded, truncate or copy the value,
365 // as appropriate.
366 for (unsigned i = 0; i != NumParts; ++i)
367 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1,
368 PartVT, IntermediateVT, V, CallConv);
369 } else if (NumParts > 0) {
370 // If the intermediate type was expanded, build the intermediate
371 // operands from the parts.
372 assert(NumParts % NumIntermediates == 0 &&
373 "Must expand into a divisible number of parts!");
374 unsigned Factor = NumParts / NumIntermediates;
375 for (unsigned i = 0; i != NumIntermediates; ++i)
376 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor,
377 PartVT, IntermediateVT, V, CallConv);
378 }
379
380 // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
381 // intermediate operands.
382 EVT BuiltVectorTy =
383 IntermediateVT.isVector()
384 ? EVT::getVectorVT(
385 *DAG.getContext(), IntermediateVT.getScalarType(),
386 IntermediateVT.getVectorElementCount() * NumParts)
387 : EVT::getVectorVT(*DAG.getContext(),
388 IntermediateVT.getScalarType(),
389 NumIntermediates);
390 Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
391 : ISD::BUILD_VECTOR,
392 DL, BuiltVectorTy, Ops);
393 }
394
395 // There is now one part, held in Val. Correct it to match ValueVT.
396 EVT PartEVT = Val.getValueType();
397
398 if (PartEVT == ValueVT)
399 return Val;
400
401 if (PartEVT.isVector()) {
402 // If the element type of the source/dest vectors are the same, but the
403 // parts vector has more elements than the value vector, then we have a
404 // vector widening case (e.g. <2 x float> -> <4 x float>). Extract the
405 // elements we want.
406 if (PartEVT.getVectorElementType() == ValueVT.getVectorElementType()) {
407 assert((PartEVT.getVectorElementCount().getKnownMinValue() >
408 ValueVT.getVectorElementCount().getKnownMinValue()) &&
409 (PartEVT.getVectorElementCount().isScalable() ==
410 ValueVT.getVectorElementCount().isScalable()) &&
411 "Cannot narrow, it would be a lossy transformation");
412 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
413 DAG.getVectorIdxConstant(0, DL));
414 }
415
416 // Vector/Vector bitcast.
417 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
418 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
419
420 assert(PartEVT.getVectorElementCount() == ValueVT.getVectorElementCount() &&
421 "Cannot handle this kind of promotion");
422 // Promoted vector extract
423 return DAG.getAnyExtOrTrunc(Val, DL, ValueVT);
424
425 }
426
427 // Trivial bitcast if the types are the same size and the destination
428 // vector type is legal.
429 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
430 TLI.isTypeLegal(ValueVT))
431 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
432
433 if (ValueVT.getVectorNumElements() != 1) {
434 // Certain ABIs require that vectors are passed as integers. For vectors
435 // are the same size, this is an obvious bitcast.
436 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) {
437 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
438 } else if (ValueVT.bitsLT(PartEVT)) {
439 // Bitcast Val back the original type and extract the corresponding
440 // vector we want.
441 unsigned Elts = PartEVT.getSizeInBits() / ValueVT.getScalarSizeInBits();
442 EVT WiderVecType = EVT::getVectorVT(*DAG.getContext(),
443 ValueVT.getVectorElementType(), Elts);
444 Val = DAG.getBitcast(WiderVecType, Val);
445 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
446 DAG.getVectorIdxConstant(0, DL));
447 }
448
449 diagnosePossiblyInvalidConstraint(
450 *DAG.getContext(), V, "non-trivial scalar-to-vector conversion");
451 return DAG.getUNDEF(ValueVT);
452 }
453
454 // Handle cases such as i8 -> <1 x i1>
455 EVT ValueSVT = ValueVT.getVectorElementType();
456 if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT) {
457 if (ValueSVT.getSizeInBits() == PartEVT.getSizeInBits())
458 Val = DAG.getNode(ISD::BITCAST, DL, ValueSVT, Val);
459 else
460 Val = ValueVT.isFloatingPoint()
461 ? DAG.getFPExtendOrRound(Val, DL, ValueSVT)
462 : DAG.getAnyExtOrTrunc(Val, DL, ValueSVT);
463 }
464
465 return DAG.getBuildVector(ValueVT, DL, Val);
466}
467
468static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
469 SDValue Val, SDValue *Parts, unsigned NumParts,
470 MVT PartVT, const Value *V,
471 Optional<CallingConv::ID> CallConv);
472
473/// getCopyToParts - Create a series of nodes that contain the specified value
474/// split into legal parts. If the parts contain more bits than Val, then, for
475/// integers, ExtendKind can be used to specify how to generate the extra bits.
476static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val,
477 SDValue *Parts, unsigned NumParts, MVT PartVT,
478 const Value *V,
479 Optional<CallingConv::ID> CallConv = None,
480 ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
481 // Let the target split the parts if it wants to
482 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
483 if (TLI.splitValueIntoRegisterParts(DAG, DL, Val, Parts, NumParts, PartVT,
484 CallConv))
485 return;
486 EVT ValueVT = Val.getValueType();
487
488 // Handle the vector case separately.
489 if (ValueVT.isVector())
490 return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V,
491 CallConv);
492
493 unsigned PartBits = PartVT.getSizeInBits();
494 unsigned OrigNumParts = NumParts;
495 assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
496 "Copying to an illegal type!");
497
498 if (NumParts == 0)
499 return;
500
501 assert(!ValueVT.isVector() && "Vector case handled elsewhere");
502 EVT PartEVT = PartVT;
503 if (PartEVT == ValueVT) {
504 assert(NumParts == 1 && "No-op copy with multiple parts!");
505 Parts[0] = Val;
506 return;
507 }
508
509 if (NumParts * PartBits > ValueVT.getSizeInBits()) {
510 // If the parts cover more bits than the value has, promote the value.
511 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
512 assert(NumParts == 1 && "Do not know what to promote to!");
513 Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
514 } else {
515 if (ValueVT.isFloatingPoint()) {
516 // FP values need to be bitcast, then extended if they are being put
517 // into a larger container.
518 ValueVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
519 Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
520 }
521 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
522 ValueVT.isInteger() &&
523 "Unknown mismatch!");
524 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
525 Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
526 if (PartVT == MVT::x86mmx)
527 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
528 }
529 } else if (PartBits == ValueVT.getSizeInBits()) {
530 // Different types of the same size.
531 assert(NumParts == 1 && PartEVT != ValueVT);
532 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
533 } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
534 // If the parts cover less bits than value has, truncate the value.
535 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
536 ValueVT.isInteger() &&
537 "Unknown mismatch!");
538 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
539 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
540 if (PartVT == MVT::x86mmx)
541 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
542 }
543
544 // The value may have changed - recompute ValueVT.
545 ValueVT = Val.getValueType();
546 assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
547 "Failed to tile the value with PartVT!");
548
549 if (NumParts == 1) {
550 if (PartEVT != ValueVT) {
551 diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
552 "scalar-to-vector conversion failed");
553 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
554 }
555
556 Parts[0] = Val;
557 return;
558 }
559
560 // Expand the value into multiple parts.
561 if (NumParts & (NumParts - 1)) {
562 // The number of parts is not a power of 2. Split off and copy the tail.
563 assert(PartVT.isInteger() && ValueVT.isInteger() &&
564 "Do not know what to expand to!");
565 unsigned RoundParts = 1 << Log2_32(NumParts);
566 unsigned RoundBits = RoundParts * PartBits;
567 unsigned OddParts = NumParts - RoundParts;
568 SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
569 DAG.getShiftAmountConstant(RoundBits, ValueVT, DL, /*LegalTypes*/false));
570
571 getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V,
572 CallConv);
573
574 if (DAG.getDataLayout().isBigEndian())
575 // The odd parts were reversed by getCopyToParts - unreverse them.
576 std::reverse(Parts + RoundParts, Parts + NumParts);
577
578 NumParts = RoundParts;
579 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
580 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
581 }
582
583 // The number of parts is a power of 2. Repeatedly bisect the value using
584 // EXTRACT_ELEMENT.
585 Parts[0] = DAG.getNode(ISD::BITCAST, DL,
586 EVT::getIntegerVT(*DAG.getContext(),
587 ValueVT.getSizeInBits()),
588 Val);
589
590 for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
591 for (unsigned i = 0; i < NumParts; i += StepSize) {
592 unsigned ThisBits = StepSize * PartBits / 2;
593 EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
594 SDValue &Part0 = Parts[i];
595 SDValue &Part1 = Parts[i+StepSize/2];
596
597 Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
598 ThisVT, Part0, DAG.getIntPtrConstant(1, DL));
599 Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
600 ThisVT, Part0, DAG.getIntPtrConstant(0, DL));
601
602 if (ThisBits == PartBits && ThisVT != PartVT) {
603 Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
604 Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
605 }
606 }
607 }
608
609 if (DAG.getDataLayout().isBigEndian())
610 std::reverse(Parts, Parts + OrigNumParts);
611}
612
613static SDValue widenVectorToPartType(SelectionDAG &DAG,
614 SDValue Val, const SDLoc &DL, EVT PartVT) {
615 if (!PartVT.isFixedLengthVector())
616 return SDValue();
617
618 EVT ValueVT = Val.getValueType();
619 unsigned PartNumElts = PartVT.getVectorNumElements();
620 unsigned ValueNumElts = ValueVT.getVectorNumElements();
621 if (PartNumElts > ValueNumElts &&
622 PartVT.getVectorElementType() == ValueVT.getVectorElementType()) {
623 EVT ElementVT = PartVT.getVectorElementType();
624 // Vector widening case, e.g. <2 x float> -> <4 x float>. Shuffle in
625 // undef elements.
626 SmallVector<SDValue, 16> Ops;
627 DAG.ExtractVectorElements(Val, Ops);
628 SDValue EltUndef = DAG.getUNDEF(ElementVT);
629 for (unsigned i = ValueNumElts, e = PartNumElts; i != e; ++i)
630 Ops.push_back(EltUndef);
631
632 // FIXME: Use CONCAT for 2x -> 4x.
633 return DAG.getBuildVector(PartVT, DL, Ops);
634 }
635
636 return SDValue();
637}
638
639/// getCopyToPartsVector - Create a series of nodes that contain the specified
640/// value split into legal parts.
641static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
642 SDValue Val, SDValue *Parts, unsigned NumParts,
643 MVT PartVT, const Value *V,
644 Optional<CallingConv::ID> CallConv) {
645 EVT ValueVT = Val.getValueType();
646 assert(ValueVT.isVector() && "Not a vector");
647 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
648 const bool IsABIRegCopy = CallConv.hasValue();
649
650 if (NumParts == 1) {
651 EVT PartEVT = PartVT;
652 if (PartEVT == ValueVT) {
653 // Nothing to do.
654 } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
655 // Bitconvert vector->vector case.
656 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
657 } else if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, PartVT)) {
658 Val = Widened;
659 } else if (PartVT.isVector() &&
660 PartEVT.getVectorElementType().bitsGE(
661 ValueVT.getVectorElementType()) &&
662 PartEVT.getVectorElementCount() ==
663 ValueVT.getVectorElementCount()) {
664
665 // Promoted vector extract
666 Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
667 } else {
668 if (ValueVT.getVectorElementCount().isScalar()) {
669 Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
670 DAG.getVectorIdxConstant(0, DL));
671 } else {
672 uint64_t ValueSize = ValueVT.getFixedSizeInBits();
673 assert(PartVT.getFixedSizeInBits() > ValueSize &&
674 "lossy conversion of vector to scalar type");
675 EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
676 Val = DAG.getBitcast(IntermediateType, Val);
677 Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
678 }
679 }
680
681 assert(Val.getValueType() == PartVT && "Unexpected vector part value type");
682 Parts[0] = Val;
683 return;
684 }
685
686 // Handle a multi-element vector.
687 EVT IntermediateVT;
688 MVT RegisterVT;
689 unsigned NumIntermediates;
690 unsigned NumRegs;
691 if (IsABIRegCopy) {
692 NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
693 *DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT,
694 NumIntermediates, RegisterVT);
695 } else {
696 NumRegs =
697 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
698 NumIntermediates, RegisterVT);
699 }
700
701 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
702 NumParts = NumRegs; // Silence a compiler warning.
703 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
704
705 assert(IntermediateVT.isScalableVector() == ValueVT.isScalableVector() &&
706 "Mixing scalable and fixed vectors when copying in parts");
707
708 Optional<ElementCount> DestEltCnt;
709
710 if (IntermediateVT.isVector())
711 DestEltCnt = IntermediateVT.getVectorElementCount() * NumIntermediates;
712 else
713 DestEltCnt = ElementCount::getFixed(NumIntermediates);
714
715 EVT BuiltVectorTy = EVT::getVectorVT(
716 *DAG.getContext(), IntermediateVT.getScalarType(), DestEltCnt.getValue());
717 if (ValueVT != BuiltVectorTy) {
718 if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, BuiltVectorTy))
719 Val = Widened;
720
721 Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val);
722 }
723
724 // Split the vector into intermediate operands.
725 SmallVector<SDValue, 8> Ops(NumIntermediates);
726 for (unsigned i = 0; i != NumIntermediates; ++i) {
727 if (IntermediateVT.isVector()) {
728 // This does something sensible for scalable vectors - see the
729 // definition of EXTRACT_SUBVECTOR for further details.
730 unsigned IntermediateNumElts = IntermediateVT.getVectorMinNumElements();
731 Ops[i] =
732 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val,
733 DAG.getVectorIdxConstant(i * IntermediateNumElts, DL));
734 } else {
735 Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val,
736 DAG.getVectorIdxConstant(i, DL));
737 }
738 }
739
740 // Split the intermediate operands into legal parts.
741 if (NumParts == NumIntermediates) {
742 // If the register was not expanded, promote or copy the value,
743 // as appropriate.
744 for (unsigned i = 0; i != NumParts; ++i)
745 getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V, CallConv);
746 } else if (NumParts > 0) {
747 // If the intermediate type was expanded, split each the value into
748 // legal parts.
749 assert(NumIntermediates != 0 && "division by zero");
750 assert(NumParts % NumIntermediates == 0 &&
751 "Must expand into a divisible number of parts!");
752 unsigned Factor = NumParts / NumIntermediates;
753 for (unsigned i = 0; i != NumIntermediates; ++i)
754 getCopyToParts(DAG, DL, Ops[i], &Parts[i * Factor], Factor, PartVT, V,
755 CallConv);
756 }
757}
758
759RegsForValue::RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt,
760 EVT valuevt, Optional<CallingConv::ID> CC)
761 : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
762 RegCount(1, regs.size()), CallConv(CC) {}
763
764RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
765 const DataLayout &DL, unsigned Reg, Type *Ty,
766 Optional<CallingConv::ID> CC) {
767 ComputeValueVTs(TLI, DL, Ty, ValueVTs);
768
769 CallConv = CC;
770
771 for (EVT ValueVT : ValueVTs) {
772 unsigned NumRegs =
773 isABIMangled()
774 ? TLI.getNumRegistersForCallingConv(Context, CC.getValue(), ValueVT)
775 : TLI.getNumRegisters(Context, ValueVT);
776 MVT RegisterVT =
777 isABIMangled()
778 ? TLI.getRegisterTypeForCallingConv(Context, CC.getValue(), ValueVT)
779 : TLI.getRegisterType(Context, ValueVT);
780 for (unsigned i = 0; i != NumRegs; ++i)
781 Regs.push_back(Reg + i);
782 RegVTs.push_back(RegisterVT);
783 RegCount.push_back(NumRegs);
784 Reg += NumRegs;
785 }
786}
787
788SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
789 FunctionLoweringInfo &FuncInfo,
790 const SDLoc &dl, SDValue &Chain,
791 SDValue *Flag, const Value *V) const {
792 // A Value with type {} or [0 x %t] needs no registers.
793 if (ValueVTs.empty())
794 return SDValue();
795
796 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
797
798 // Assemble the legal parts into the final values.
799 SmallVector<SDValue, 4> Values(ValueVTs.size());
800 SmallVector<SDValue, 8> Parts;
801 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
802 // Copy the legal parts from the registers.
803 EVT ValueVT = ValueVTs[Value];
804 unsigned NumRegs = RegCount[Value];
805 MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv(
806 *DAG.getContext(),
807 CallConv.getValue(), RegVTs[Value])
808 : RegVTs[Value];
809
810 Parts.resize(NumRegs);
811 for (unsigned i = 0; i != NumRegs; ++i) {
812 SDValue P;
813 if (!Flag) {
814 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
815 } else {
816 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
817 *Flag = P.getValue(2);
818 }
819
820 Chain = P.getValue(1);
821 Parts[i] = P;
822
823 // If the source register was virtual and if we know something about it,
824 // add an assert node.
825 if (!Register::isVirtualRegister(Regs[Part + i]) ||
826 !RegisterVT.isInteger())
827 continue;
828
829 const FunctionLoweringInfo::LiveOutInfo *LOI =
830 FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
831 if (!LOI)
832 continue;
833
834 unsigned RegSize = RegisterVT.getScalarSizeInBits();
835 unsigned NumSignBits = LOI->NumSignBits;
836 unsigned NumZeroBits = LOI->Known.countMinLeadingZeros();
837
838 if (NumZeroBits == RegSize) {
839 // The current value is a zero.
840 // Explicitly express that as it would be easier for
841 // optimizations to kick in.
842 Parts[i] = DAG.getConstant(0, dl, RegisterVT);
843 continue;
844 }
845
846 // FIXME: We capture more information than the dag can represent. For
847 // now, just use the tightest assertzext/assertsext possible.
848 bool isSExt;
849 EVT FromVT(MVT::Other);
850 if (NumZeroBits) {
851 FromVT = EVT::getIntegerVT(*DAG.getContext(), RegSize - NumZeroBits);
852 isSExt = false;
853 } else if (NumSignBits > 1) {
854 FromVT =
855 EVT::getIntegerVT(*DAG.getContext(), RegSize - NumSignBits + 1);
856 isSExt = true;
857 } else {
858 continue;
859 }
860 // Add an assertion node.
861 assert(FromVT != MVT::Other);
862 Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
863 RegisterVT, P, DAG.getValueType(FromVT));
864 }
865
866 Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), NumRegs,
867 RegisterVT, ValueVT, V, CallConv);
868 Part += NumRegs;
869 Parts.clear();
870 }
871
872 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
873}
874
875void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
876 const SDLoc &dl, SDValue &Chain, SDValue *Flag,
877 const Value *V,
878 ISD::NodeType PreferredExtendType) const {
879 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
880 ISD::NodeType ExtendKind = PreferredExtendType;
881
882 // Get the list of the values's legal parts.
883 unsigned NumRegs = Regs.size();
884 SmallVector<SDValue, 8> Parts(NumRegs);
885 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
886 unsigned NumParts = RegCount[Value];
887
888 MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv(
889 *DAG.getContext(),
890 CallConv.getValue(), RegVTs[Value])
891 : RegVTs[Value];
892
893 if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
894 ExtendKind = ISD::ZERO_EXTEND;
895
896 getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), &Parts[Part],
897 NumParts, RegisterVT, V, CallConv, ExtendKind);
898 Part += NumParts;
899 }
900
901 // Copy the parts into the registers.
902 SmallVector<SDValue, 8> Chains(NumRegs);
903 for (unsigned i = 0; i != NumRegs; ++i) {
904 SDValue Part;
905 if (!Flag) {
906 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
907 } else {
908 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
909 *Flag = Part.getValue(1);
910 }
911
912 Chains[i] = Part.getValue(0);
913 }
914
915 if (NumRegs == 1 || Flag)
916 // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
917 // flagged to it. That is the CopyToReg nodes and the user are considered
918 // a single scheduling unit. If we create a TokenFactor and return it as
919 // chain, then the TokenFactor is both a predecessor (operand) of the
920 // user as well as a successor (the TF operands are flagged to the user).
921 // c1, f1 = CopyToReg
922 // c2, f2 = CopyToReg
923 // c3 = TokenFactor c1, c2
924 // ...
925 // = op c3, ..., f2
926 Chain = Chains[NumRegs-1];
927 else
928 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
929}
930
931void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
932 unsigned MatchingIdx, const SDLoc &dl,
933 SelectionDAG &DAG,
934 std::vector<SDValue> &Ops) const {
935 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
936
937 unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size());
938 if (HasMatching)
939 Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx);
940 else if (!Regs.empty() && Register::isVirtualRegister(Regs.front())) {
941 // Put the register class of the virtual registers in the flag word. That
942 // way, later passes can recompute register class constraints for inline
943 // assembly as well as normal instructions.
944 // Don't do this for tied operands that can use the regclass information
945 // from the def.
946 const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
947 const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
948 Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
949 }
950
951 SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32);
952 Ops.push_back(Res);
953
954 if (Code == InlineAsm::Kind_Clobber) {
955 // Clobbers should always have a 1:1 mapping with registers, and may
956 // reference registers that have illegal (e.g. vector) types. Hence, we
957 // shouldn't try to apply any sort of splitting logic to them.
958 assert(Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() &&
959 "No 1:1 mapping from clobbers to regs?");
960 Register SP = TLI.getStackPointerRegisterToSaveRestore();
961 (void)SP;
962 for (unsigned I = 0, E = ValueVTs.size(); I != E; ++I) {
963 Ops.push_back(DAG.getRegister(Regs[I], RegVTs[I]));
964 assert(
965 (Regs[I] != SP ||
966 DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) &&
967 "If we clobbered the stack pointer, MFI should know about it.");
968 }
969 return;
970 }
971
972 for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
973 unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
974 MVT RegisterVT = RegVTs[Value];
975 for (unsigned i = 0; i != NumRegs; ++i) {
976 assert(Reg < Regs.size() && "Mismatch in # registers expected");
977 unsigned TheReg = Regs[Reg++];
978 Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
979 }
980 }
981}
982
983SmallVector<std::pair<unsigned, TypeSize>, 4>
984RegsForValue::getRegsAndSizes() const {
985 SmallVector<std::pair<unsigned, TypeSize>, 4> OutVec;
986 unsigned I = 0;
987 for (auto CountAndVT : zip_first(RegCount, RegVTs)) {
988 unsigned RegCount = std::get<0>(CountAndVT);
989 MVT RegisterVT = std::get<1>(CountAndVT);
990 TypeSize RegisterSize = RegisterVT.getSizeInBits();
991 for (unsigned E = I + RegCount; I != E; ++I)
992 OutVec.push_back(std::make_pair(Regs[I], RegisterSize));
993 }
994 return OutVec;
995}
996
997void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis *aa,
998 const TargetLibraryInfo *li) {
999 AA = aa;
1000 GFI = gfi;
1001 LibInfo = li;
1002 DL = &DAG.getDataLayout();
1003 Context = DAG.getContext();
1004 LPadToCallSiteMap.clear();
1005 SL->init(DAG.getTargetLoweringInfo(), TM, DAG.getDataLayout());
1006}
1007
1008void SelectionDAGBuilder::clear() {
1009 NodeMap.clear();
1010 UnusedArgNodeMap.clear();
1011 PendingLoads.clear();
1012 PendingExports.clear();
1013 PendingConstrainedFP.clear();
1014 PendingConstrainedFPStrict.clear();
1015 CurInst = nullptr;
1016 HasTailCall = false;
1017 SDNodeOrder = LowestSDNodeOrder;
1018 StatepointLowering.clear();
1019}
1020
1021void SelectionDAGBuilder::clearDanglingDebugInfo() {
1022 DanglingDebugInfoMap.clear();
1023}
1024
1025// Update DAG root to include dependencies on Pending chains.
1026SDValue SelectionDAGBuilder::updateRoot(SmallVectorImpl<SDValue> &Pending) {
1027 SDValue Root = DAG.getRoot();
1028
1029 if (Pending.empty())
1030 return Root;
1031
1032 // Add current root to PendingChains, unless we already indirectly
1033 // depend on it.
1034 if (Root.getOpcode() != ISD::EntryToken) {
1035 unsigned i = 0, e = Pending.size();
1036 for (; i != e; ++i) {
1037 assert(Pending[i].getNode()->getNumOperands() > 1);
1038 if (Pending[i].getNode()->getOperand(0) == Root)
1039 break; // Don't add the root if we already indirectly depend on it.
1040 }
1041
1042 if (i == e)
1043 Pending.push_back(Root);
1044 }
1045
1046 if (Pending.size() == 1)
1047 Root = Pending[0];
1048 else
1049 Root = DAG.getTokenFactor(getCurSDLoc(), Pending);
1050
1051 DAG.setRoot(Root);
1052 Pending.clear();
1053 return Root;
1054}
1055
1056SDValue SelectionDAGBuilder::getMemoryRoot() {
1057 return updateRoot(PendingLoads);
1058}
1059
1060SDValue SelectionDAGBuilder::getRoot() {
1061 // Chain up all pending constrained intrinsics together with all
1062 // pending loads, by simply appending them to PendingLoads and
1063 // then calling getMemoryRoot().
1064 PendingLoads.reserve(PendingLoads.size() +
1065 PendingConstrainedFP.size() +
1066 PendingConstrainedFPStrict.size());
1067 PendingLoads.append(PendingConstrainedFP.begin(),
1068 PendingConstrainedFP.end());
1069 PendingLoads.append(PendingConstrainedFPStrict.begin(),
1070 PendingConstrainedFPStrict.end());
1071 PendingConstrainedFP.clear();
1072 PendingConstrainedFPStrict.clear();
1073 return getMemoryRoot();
1074}
1075
1076SDValue SelectionDAGBuilder::getControlRoot() {
1077 // We need to emit pending fpexcept.strict constrained intrinsics,
1078 // so append them to the PendingExports list.
1079 PendingExports.append(PendingConstrainedFPStrict.begin(),
1080 PendingConstrainedFPStrict.end());
1081 PendingConstrainedFPStrict.clear();
1082 return updateRoot(PendingExports);
1083}
1084
1085void SelectionDAGBuilder::visit(const Instruction &I) {
1086 // Set up outgoing PHI node register values before emitting the terminator.
1087 if (I.isTerminator()) {
1088 HandlePHINodesInSuccessorBlocks(I.getParent());
1089 }
1090
1091 // Increase the SDNodeOrder if dealing with a non-debug instruction.
1092 if (!isa<DbgInfoIntrinsic>(I))
1093 ++SDNodeOrder;
1094
1095 CurInst = &I;
1096
1097 visit(I.getOpcode(), I);
1098
1099 if (!I.isTerminator() && !HasTailCall &&
1100 !isa<GCStatepointInst>(I)) // statepoints handle their exports internally
1101 CopyToExportRegsIfNeeded(&I);
1102
1103 CurInst = nullptr;
1104}
1105
1106void SelectionDAGBuilder::visitPHI(const PHINode &) {
1107 llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
1108}
1109
1110void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
1111 // Note: this doesn't use InstVisitor, because it has to work with
1112 // ConstantExpr's in addition to instructions.
1113 switch (Opcode) {
1114 default: llvm_unreachable("Unknown instruction type encountered!");
1115 // Build the switch statement using the Instruction.def file.
1116#define HANDLE_INST(NUM, OPCODE, CLASS) \
1117 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1118#include "llvm/IR/Instruction.def"
1119 }
1120}
1121
1122void SelectionDAGBuilder::dropDanglingDebugInfo(const DILocalVariable *Variable,
1123 const DIExpression *Expr) {
1124 auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1125 const DbgValueInst *DI = DDI.getDI();
1126 DIVariable *DanglingVariable = DI->getVariable();
1127 DIExpression *DanglingExpr = DI->getExpression();
1128 if (DanglingVariable == Variable && Expr->fragmentsOverlap(DanglingExpr)) {
1129 LLVM_DEBUG(dbgs() << "Dropping dangling debug info for " << *DI << "\n");
1130 return true;
1131 }
1132 return false;
1133 };
1134
1135 for (auto &DDIMI : DanglingDebugInfoMap) {
1136 DanglingDebugInfoVector &DDIV = DDIMI.second;
1137
1138 // If debug info is to be dropped, run it through final checks to see
1139 // whether it can be salvaged.
1140 for (auto &DDI : DDIV)
1141 if (isMatchingDbgValue(DDI))
1142 salvageUnresolvedDbgValue(DDI);
1143
1144 erase_if(DDIV, isMatchingDbgValue);
1145 }
1146}
1147
1148// resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
1149// generate the debug data structures now that we've seen its definition.
1150void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
1151 SDValue Val) {
1152 auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1153 if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1154 return;
1155
1156 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1157 for (auto &DDI : DDIV) {
1158 const DbgValueInst *DI = DDI.getDI();
1159 assert(DI && "Ill-formed DanglingDebugInfo");
1160 DebugLoc dl = DDI.getdl();
1161 unsigned ValSDNodeOrder = Val.getNode()->getIROrder();
1162 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1163 DILocalVariable *Variable = DI->getVariable();
1164 DIExpression *Expr = DI->getExpression();
1165 assert(Variable->isValidLocationForIntrinsic(dl) &&
1166 "Expected inlined-at fields to agree");
1167 SDDbgValue *SDV;
1168 if (Val.getNode()) {
1169 // FIXME: I doubt that it is correct to resolve a dangling DbgValue as a
1170 // FuncArgumentDbgValue (it would be hoisted to the function entry, and if
1171 // we couldn't resolve it directly when examining the DbgValue intrinsic
1172 // in the first place we should not be more successful here). Unless we
1173 // have some test case that prove this to be correct we should avoid
1174 // calling EmitFuncArgumentDbgValue here.
1175 if (!EmitFuncArgumentDbgValue(V, Variable, Expr, dl, false, Val)) {
1176 LLVM_DEBUG(dbgs() << "Resolve dangling debug info [order="
1177 << DbgSDNodeOrder << "] for:\n " << *DI << "\n");
1178 LLVM_DEBUG(dbgs() << " By mapping to:\n "; Val.dump());
1179 // Increase the SDNodeOrder for the DbgValue here to make sure it is
1180 // inserted after the definition of Val when emitting the instructions
1181 // after ISel. An alternative could be to teach
1182 // ScheduleDAGSDNodes::EmitSchedule to delay the insertion properly.
1183 LLVM_DEBUG(if (ValSDNodeOrder > DbgSDNodeOrder) dbgs()
1184 << "changing SDNodeOrder from " << DbgSDNodeOrder << " to "
1185 << ValSDNodeOrder << "\n");
1186 SDV = getDbgValue(Val, Variable, Expr, dl,
1187 std::max(DbgSDNodeOrder, ValSDNodeOrder));
1188 DAG.AddDbgValue(SDV, Val.getNode(), false);
1189 } else
1190 LLVM_DEBUG(dbgs() << "Resolved dangling debug info for " << *DI
1191 << "in EmitFuncArgumentDbgValue\n");
1192 } else {
1193 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1194 auto Undef =
1195 UndefValue::get(DDI.getDI()->getVariableLocation()->getType());
1196 auto SDV =
1197 DAG.getConstantDbgValue(Variable, Expr, Undef, dl, DbgSDNodeOrder);
1198 DAG.AddDbgValue(SDV, nullptr, false);
1199 }
1200 }
1201 DDIV.clear();
1202}
1203
1204void SelectionDAGBuilder::salvageUnresolvedDbgValue(DanglingDebugInfo &DDI) {
1205 Value *V = DDI.getDI()->getValue();
1206 DILocalVariable *Var = DDI.getDI()->getVariable();
1207 DIExpression *Expr = DDI.getDI()->getExpression();
1208 DebugLoc DL = DDI.getdl();
1209 DebugLoc InstDL = DDI.getDI()->getDebugLoc();
1210 unsigned SDOrder = DDI.getSDNodeOrder();
1211
1212 // Currently we consider only dbg.value intrinsics -- we tell the salvager
1213 // that DW_OP_stack_value is desired.
1214 assert(isa<DbgValueInst>(DDI.getDI()));
1215 bool StackValue = true;
1216
1217 // Can this Value can be encoded without any further work?
1218 if (handleDebugValue(V, Var, Expr, DL, InstDL, SDOrder))
1219 return;
1220
1221 // Attempt to salvage back through as many instructions as possible. Bail if
1222 // a non-instruction is seen, such as a constant expression or global
1223 // variable. FIXME: Further work could recover those too.
1224 while (isa<Instruction>(V)) {
1225 Instruction &VAsInst = *cast<Instruction>(V);
1226 DIExpression *NewExpr = salvageDebugInfoImpl(VAsInst, Expr, StackValue);
1227
1228 // If we cannot salvage any further, and haven't yet found a suitable debug
1229 // expression, bail out.
1230 if (!NewExpr)
1231 break;
1232
1233 // New value and expr now represent this debuginfo.
1234 V = VAsInst.getOperand(0);
1235 Expr = NewExpr;
1236
1237 // Some kind of simplification occurred: check whether the operand of the
1238 // salvaged debug expression can be encoded in this DAG.
1239 if (handleDebugValue(V, Var, Expr, DL, InstDL, SDOrder)) {
1240 LLVM_DEBUG(dbgs() << "Salvaged debug location info for:\n "
1241 << DDI.getDI() << "\nBy stripping back to:\n " << V);
1242 return;
1243 }
1244 }
1245
1246 // This was the final opportunity to salvage this debug information, and it
1247 // couldn't be done. Place an undef DBG_VALUE at this location to terminate
1248 // any earlier variable location.
1249 auto Undef = UndefValue::get(DDI.getDI()->getVariableLocation()->getType());
1250 auto SDV = DAG.getConstantDbgValue(Var, Expr, Undef, DL, SDNodeOrder);
1251 DAG.AddDbgValue(SDV, nullptr, false);
1252
1253 LLVM_DEBUG(dbgs() << "Dropping debug value info for:\n " << DDI.getDI()
1254 << "\n");
1255 LLVM_DEBUG(dbgs() << " Last seen at:\n " << *DDI.getDI()->getOperand(0)
1256 << "\n");
1257}
1258
1259bool SelectionDAGBuilder::handleDebugValue(const Value *V, DILocalVariable *Var,
1260 DIExpression *Expr, DebugLoc dl,
1261 DebugLoc InstDL, unsigned Order) {
1262 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1263 SDDbgValue *SDV;
1264 if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) ||
1265 isa<ConstantPointerNull>(V)) {
1266 SDV = DAG.getConstantDbgValue(Var, Expr, V, dl, SDNodeOrder);
1267 DAG.AddDbgValue(SDV, nullptr, false);
1268 return true;
1269 }
1270
1271 // If the Value is a frame index, we can create a FrameIndex debug value
1272 // without relying on the DAG at all.
1273 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1274 auto SI = FuncInfo.StaticAllocaMap.find(AI);
1275 if (SI != FuncInfo.StaticAllocaMap.end()) {
1276 auto SDV =
1277 DAG.getFrameIndexDbgValue(Var, Expr, SI->second,
1278 /*IsIndirect*/ false, dl, SDNodeOrder);
1279 // Do not attach the SDNodeDbgValue to an SDNode: this variable location
1280 // is still available even if the SDNode gets optimized out.
1281 DAG.AddDbgValue(SDV, nullptr, false);
1282 return true;
1283 }
1284 }
1285
1286 // Do not use getValue() in here; we don't want to generate code at
1287 // this point if it hasn't been done yet.
1288 SDValue N = NodeMap[V];
1289 if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map.
1290 N = UnusedArgNodeMap[V];
1291 if (N.getNode()) {
1292 if (EmitFuncArgumentDbgValue(V, Var, Expr, dl, false, N))
1293 return true;
1294 SDV = getDbgValue(N, Var, Expr, dl, SDNodeOrder);
1295 DAG.AddDbgValue(SDV, N.getNode(), false);
1296 return true;
1297 }
1298
1299 // Special rules apply for the first dbg.values of parameter variables in a
1300 // function. Identify them by the fact they reference Argument Values, that
1301 // they're parameters, and they are parameters of the current function. We
1302 // need to let them dangle until they get an SDNode.
1303 bool IsParamOfFunc = isa<Argument>(V) && Var->isParameter() &&
1304 !InstDL.getInlinedAt();
1305 if (!IsParamOfFunc) {
1306 // The value is not used in this block yet (or it would have an SDNode).
1307 // We still want the value to appear for the user if possible -- if it has
1308 // an associated VReg, we can refer to that instead.
1309 auto VMI = FuncInfo.ValueMap.find(V);
1310 if (VMI != FuncInfo.ValueMap.end()) {
1311 unsigned Reg = VMI->second;
1312 // If this is a PHI node, it may be split up into several MI PHI nodes
1313 // (in FunctionLoweringInfo::set).
1314 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
1315 V->getType(), None);
1316 if (RFV.occupiesMultipleRegs()) {
1317 unsigned Offset = 0;
1318 unsigned BitsToDescribe = 0;
1319 if (auto VarSize = Var->getSizeInBits())
1320 BitsToDescribe = *VarSize;
1321 if (auto Fragment = Expr->getFragmentInfo())
1322 BitsToDescribe = Fragment->SizeInBits;
1323 for (auto RegAndSize : RFV.getRegsAndSizes()) {
1324 unsigned RegisterSize = RegAndSize.second;
1325 // Bail out if all bits are described already.
1326 if (Offset >= BitsToDescribe)
1327 break;
1328 unsigned FragmentSize = (Offset + RegisterSize > BitsToDescribe)
1329 ? BitsToDescribe - Offset
1330 : RegisterSize;
1331 auto FragmentExpr = DIExpression::createFragmentExpression(
1332 Expr, Offset, FragmentSize);
1333 if (!FragmentExpr)
1334 continue;
1335 SDV = DAG.getVRegDbgValue(Var, *FragmentExpr, RegAndSize.first,
1336 false, dl, SDNodeOrder);
1337 DAG.AddDbgValue(SDV, nullptr, false);
1338 Offset += RegisterSize;
1339 }
1340 } else {
1341 SDV = DAG.getVRegDbgValue(Var, Expr, Reg, false, dl, SDNodeOrder);
1342 DAG.AddDbgValue(SDV, nullptr, false);
1343 }
1344 return true;
1345 }
1346 }
1347
1348 return false;
1349}
1350
1351void SelectionDAGBuilder::resolveOrClearDbgInfo() {
1352 // Try to fixup any remaining dangling debug info -- and drop it if we can't.
1353 for (auto &Pair : DanglingDebugInfoMap)
1354 for (auto &DDI : Pair.second)
1355 salvageUnresolvedDbgValue(DDI);
1356 clearDanglingDebugInfo();
1357}
1358
1359/// getCopyFromRegs - If there was virtual register allocated for the value V
1360/// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
1361SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
1362 DenseMap<const Value *, Register>::iterator It = FuncInfo.ValueMap.find(V);
1363 SDValue Result;
1364
1365 if (It != FuncInfo.ValueMap.end()) {
1366 Register InReg = It->second;
1367
1368 RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
1369 DAG.getDataLayout(), InReg, Ty,
1370 None); // This is not an ABI copy.
1371 SDValue Chain = DAG.getEntryNode();
1372 Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr,
1373 V);
1374 resolveDanglingDebugInfo(V, Result);
1375 }
1376
1377 return Result;
1378}
1379
1380/// getValue - Return an SDValue for the given Value.
1381SDValue SelectionDAGBuilder::getValue(const Value *V) {
1382 // If we already have an SDValue for this value, use it. It's important
1383 // to do this first, so that we don't create a CopyFromReg if we already
1384 // have a regular SDValue.
1385 SDValue &N = NodeMap[V];
1386 if (N.getNode()) return N;
1387
1388 // If there's a virtual register allocated and initialized for this
1389 // value, use it.
1390 if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
1391 return copyFromReg;
1392
1393 // Otherwise create a new SDValue and remember it.
1394 SDValue Val = getValueImpl(V);
1395 NodeMap[V] = Val;
1396 resolveDanglingDebugInfo(V, Val);
1397 return Val;
1398}
1399
1400/// getNonRegisterValue - Return an SDValue for the given Value, but
1401/// don't look in FuncInfo.ValueMap for a virtual register.
1402SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
1403 // If we already have an SDValue for this value, use it.
1404 SDValue &N = NodeMap[V];
1405 if (N.getNode()) {
1406 if (isa<ConstantSDNode>(N) || isa<ConstantFPSDNode>(N)) {
1407 // Remove the debug location from the node as the node is about to be used
1408 // in a location which may differ from the original debug location. This
1409 // is relevant to Constant and ConstantFP nodes because they can appear
1410 // as constant expressions inside PHI nodes.
1411 N->setDebugLoc(DebugLoc());
1412 }
1413 return N;
1414 }
1415
1416 // Otherwise create a new SDValue and remember it.
1417 SDValue Val = getValueImpl(V);
1418 NodeMap[V] = Val;
1419 resolveDanglingDebugInfo(V, Val);
1420 return Val;
1421}
1422
1423/// getValueImpl - Helper function for getValue and getNonRegisterValue.
1424/// Create an SDValue for the given value.
1425SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
1426 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1427
1428 if (const Constant *C = dyn_cast<Constant>(V)) {
1429 EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true);
1430
1431 if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
1432 return DAG.getConstant(*CI, getCurSDLoc(), VT);
1433
1434 if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
1435 return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
1436
1437 if (isa<ConstantPointerNull>(C)) {
1438 unsigned AS = V->getType()->getPointerAddressSpace();
1439 return DAG.getConstant(0, getCurSDLoc(),
1440 TLI.getPointerTy(DAG.getDataLayout(), AS));
1441 }
1442
1443 if (match(C, m_VScale(DAG.getDataLayout())))
1444 return DAG.getVScale(getCurSDLoc(), VT, APInt(VT.getSizeInBits(), 1));
1445
1446 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
1447 return DAG.getConstantFP(*CFP, getCurSDLoc(), VT);
1448
1449 if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
1450 return DAG.getUNDEF(VT);
1451
1452 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
1453 visit(CE->getOpcode(), *CE);
1454 SDValue N1 = NodeMap[V];
1455 assert(N1.getNode() && "visit didn't populate the NodeMap!");
1456 return N1;
1457 }
1458
1459 if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
1460 SmallVector<SDValue, 4> Constants;
1461 for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
1462 OI != OE; ++OI) {
1463 SDNode *Val = getValue(*OI).getNode();
1464 // If the operand is an empty aggregate, there are no values.
1465 if (!Val) continue;
1466 // Add each leaf value from the operand to the Constants list
1467 // to form a flattened list of all the values.
1468 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1469 Constants.push_back(SDValue(Val, i));
1470 }
1471
1472 return DAG.getMergeValues(Constants, getCurSDLoc());
1473 }
1474
1475 if (const ConstantDataSequential *CDS =
1476 dyn_cast<ConstantDataSequential>(C)) {
1477 SmallVector<SDValue, 4> Ops;
1478 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1479 SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1480 // Add each leaf value from the operand to the Constants list
1481 // to form a flattened list of all the values.
1482 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1483 Ops.push_back(SDValue(Val, i));
1484 }
1485
1486 if (isa<ArrayType>(CDS->getType()))
1487 return DAG.getMergeValues(Ops, getCurSDLoc());
1488 return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1489 }
1490
1491 if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
1492 assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
1493 "Unknown struct or array constant!");
1494
1495 SmallVector<EVT, 4> ValueVTs;
1496 ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
1497 unsigned NumElts = ValueVTs.size();
1498 if (NumElts == 0)
1499 return SDValue(); // empty struct
1500 SmallVector<SDValue, 4> Constants(NumElts);
1501 for (unsigned i = 0; i != NumElts; ++i) {
1502 EVT EltVT = ValueVTs[i];
1503 if (isa<UndefValue>(C))
1504 Constants[i] = DAG.getUNDEF(EltVT);
1505 else if (EltVT.isFloatingPoint())
1506 Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1507 else
1508 Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT);
1509 }
1510
1511 return DAG.getMergeValues(Constants, getCurSDLoc());
1512 }
1513
1514 if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
1515 return DAG.getBlockAddress(BA, VT);
1516
1517 if (const auto *Equiv = dyn_cast<DSOLocalEquivalent>(C))
1518 return getValue(Equiv->getGlobalValue());
1519
1520 VectorType *VecTy = cast<VectorType>(V->getType());
1521
1522 // Now that we know the number and type of the elements, get that number of
1523 // elements into the Ops array based on what kind of constant it is.
1524 if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
1525 SmallVector<SDValue, 16> Ops;
1526 unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
1527 for (unsigned i = 0; i != NumElements; ++i)
1528 Ops.push_back(getValue(CV->getOperand(i)));
1529
1530 return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1531 } else if (isa<ConstantAggregateZero>(C)) {
1532 EVT EltVT =
1533 TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType());
1534
1535 SDValue Op;
1536 if (EltVT.isFloatingPoint())
1537 Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1538 else
1539 Op = DAG.getConstant(0, getCurSDLoc(), EltVT);
1540
1541 if (isa<ScalableVectorType>(VecTy))
1542 return NodeMap[V] = DAG.getSplatVector(VT, getCurSDLoc(), Op);
1543 else {
1544 SmallVector<SDValue, 16> Ops;
1545 Ops.assign(cast<FixedVectorType>(VecTy)->getNumElements(), Op);
1546 return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1547 }
1548 }
1549 llvm_unreachable("Unknown vector constant");
1550 }
1551
1552 // If this is a static alloca, generate it as the frameindex instead of
1553 // computation.
1554 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1555 DenseMap<const AllocaInst*, int>::iterator SI =
1556 FuncInfo.StaticAllocaMap.find(AI);
1557 if (SI != FuncInfo.StaticAllocaMap.end())
1558 return DAG.getFrameIndex(SI->second,
1559 TLI.getFrameIndexTy(DAG.getDataLayout()));
1560 }
1561
1562 // If this is an instruction which fast-isel has deferred, select it now.
1563 if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
1564 unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
1565
1566 RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
1567 Inst->getType(), None);
1568 SDValue Chain = DAG.getEntryNode();
1569 return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1570 }
1571
1572 if (const MetadataAsValue *MD = dyn_cast<MetadataAsValue>(V)) {
1573 return DAG.getMDNode(cast<MDNode>(MD->getMetadata()));
1574 }
1575 llvm_unreachable("Can't get register for value!");
1576}
1577
1578void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) {
1579 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1580 bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX;
1581 bool IsCoreCLR = Pers == EHPersonality::CoreCLR;
1582 bool IsSEH = isAsynchronousEHPersonality(Pers);
1583 MachineBasicBlock *CatchPadMBB = FuncInfo.MBB;
1584 if (!IsSEH)
1585 CatchPadMBB->setIsEHScopeEntry();
1586 // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues.
1587 if (IsMSVCCXX || IsCoreCLR)
1588 CatchPadMBB->setIsEHFuncletEntry();
1589}
1590
1591void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) {
1592 // Update machine-CFG edge.
1593 MachineBasicBlock *TargetMBB = FuncInfo.MBBMap[I.getSuccessor()];
1594 FuncInfo.MBB->addSuccessor(TargetMBB);
1595
1596 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1597 bool IsSEH = isAsynchronousEHPersonality(Pers);
1598 if (IsSEH) {
1599 // If this is not a fall-through branch or optimizations are switched off,
1600 // emit the branch.
1601 if (TargetMBB != NextBlock(FuncInfo.MBB) ||
1602 TM.getOptLevel() == CodeGenOpt::None)
1603 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
1604 getControlRoot(), DAG.getBasicBlock(TargetMBB)));
1605 return;
1606 }
1607
1608 // Figure out the funclet membership for the catchret's successor.
1609 // This will be used by the FuncletLayout pass to determine how to order the
1610 // BB's.
1611 // A 'catchret' returns to the outer scope's color.
1612 Value *ParentPad = I.getCatchSwitchParentPad();
1613 const BasicBlock *SuccessorColor;
1614 if (isa<ConstantTokenNone>(ParentPad))
1615 SuccessorColor = &FuncInfo.Fn->getEntryBlock();
1616 else
1617 SuccessorColor = cast<Instruction>(ParentPad)->getParent();
1618 assert(SuccessorColor && "No parent funclet for catchret!");
1619 MachineBasicBlock *SuccessorColorMBB = FuncInfo.MBBMap[SuccessorColor];
1620 assert(SuccessorColorMBB && "No MBB for SuccessorColor!");
1621
1622 // Create the terminator node.
1623 SDValue Ret = DAG.getNode(ISD::CATCHRET, getCurSDLoc(), MVT::Other,
1624 getControlRoot(), DAG.getBasicBlock(TargetMBB),
1625 DAG.getBasicBlock(SuccessorColorMBB));
1626 DAG.setRoot(Ret);
1627}
1628
1629void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) {
1630 // Don't emit any special code for the cleanuppad instruction. It just marks
1631 // the start of an EH scope/funclet.
1632 FuncInfo.MBB->setIsEHScopeEntry();
1633 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1634 if (Pers != EHPersonality::Wasm_CXX) {
1635 FuncInfo.MBB->setIsEHFuncletEntry();
1636 FuncInfo.MBB->setIsCleanupFuncletEntry();
1637 }
1638}
1639
1640// In wasm EH, even though a catchpad may not catch an exception if a tag does
1641// not match, it is OK to add only the first unwind destination catchpad to the
1642// successors, because there will be at least one invoke instruction within the
1643// catch scope that points to the next unwind destination, if one exists, so
1644// CFGSort cannot mess up with BB sorting order.
1645// (All catchpads with 'catch (type)' clauses have a 'llvm.rethrow' intrinsic
1646// call within them, and catchpads only consisting of 'catch (...)' have a
1647// '__cxa_end_catch' call within them, both of which generate invokes in case
1648// the next unwind destination exists, i.e., the next unwind destination is not
1649// the caller.)
1650//
1651// Having at most one EH pad successor is also simpler and helps later
1652// transformations.
1653//
1654// For example,
1655// current:
1656// invoke void @foo to ... unwind label %catch.dispatch
1657// catch.dispatch:
1658// %0 = catchswitch within ... [label %catch.start] unwind label %next
1659// catch.start:
1660// ...
1661// ... in this BB or some other child BB dominated by this BB there will be an
1662// invoke that points to 'next' BB as an unwind destination
1663//
1664// next: ; We don't need to add this to 'current' BB's successor
1665// ...
1666static void findWasmUnwindDestinations(
1667 FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
1668 BranchProbability Prob,
1669 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
1670 &UnwindDests) {
1671 while (EHPadBB) {
1672 const Instruction *Pad = EHPadBB->getFirstNonPHI();
1673 if (isa<CleanupPadInst>(Pad)) {
1674 // Stop on cleanup pads.
1675 UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1676 UnwindDests.back().first->setIsEHScopeEntry();
1677 break;
1678 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
1679 // Add the catchpad handlers to the possible destinations. We don't
1680 // continue to the unwind destination of the catchswitch for wasm.
1681 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
1682 UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
1683 UnwindDests.back().first->setIsEHScopeEntry();
1684 }
1685 break;
1686 } else {
1687 continue;
1688 }
1689 }
1690}
1691
1692/// When an invoke or a cleanupret unwinds to the next EH pad, there are
1693/// many places it could ultimately go. In the IR, we have a single unwind
1694/// destination, but in the machine CFG, we enumerate all the possible blocks.
1695/// This function skips over imaginary basic blocks that hold catchswitch
1696/// instructions, and finds all the "real" machine
1697/// basic block destinations. As those destinations may not be successors of
1698/// EHPadBB, here we also calculate the edge probability to those destinations.
1699/// The passed-in Prob is the edge probability to EHPadBB.
1700static void findUnwindDestinations(
1701 FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
1702 BranchProbability Prob,
1703 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
1704 &UnwindDests) {
1705 EHPersonality Personality =
1706 classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1707 bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
1708 bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
1709 bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
1710 bool IsSEH = isAsynchronousEHPersonality(Personality);
1711
1712 if (IsWasmCXX) {
1713 findWasmUnwindDestinations(FuncInfo, EHPadBB, Prob, UnwindDests);
1714 assert(UnwindDests.size() <= 1 &&
1715 "There should be at most one unwind destination for wasm");
1716 return;
1717 }
1718
1719 while (EHPadBB) {
1720 const Instruction *Pad = EHPadBB->getFirstNonPHI();
1721 BasicBlock *NewEHPadBB = nullptr;
1722 if (isa<LandingPadInst>(Pad)) {
1723 // Stop on landingpads. They are not funclets.
1724 UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1725 break;
1726 } else if (isa<CleanupPadInst>(Pad)) {
1727 // Stop on cleanup pads. Cleanups are always funclet entries for all known
1728 // personalities.
1729 UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1730 UnwindDests.back().first->setIsEHScopeEntry();
1731 UnwindDests.back().first->setIsEHFuncletEntry();
1732 break;
1733 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
1734 // Add the catchpad handlers to the possible destinations.
1735 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
1736 UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
1737 // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
1738 if (IsMSVCCXX || IsCoreCLR)
1739 UnwindDests.back().first->setIsEHFuncletEntry();
1740 if (!IsSEH)
1741 UnwindDests.back().first->setIsEHScopeEntry();
1742 }
1743 NewEHPadBB = CatchSwitch->getUnwindDest();
1744 } else {
1745 continue;
1746 }
1747
1748 BranchProbabilityInfo *BPI = FuncInfo.BPI;
1749 if (BPI && NewEHPadBB)
1750 Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
1751 EHPadBB = NewEHPadBB;
1752 }
1753}
1754
1755void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) {
1756 // Update successor info.
1757 SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
1758 auto UnwindDest = I.getUnwindDest();
1759 BranchProbabilityInfo *BPI = FuncInfo.BPI;
1760 BranchProbability UnwindDestProb =
1761 (BPI && UnwindDest)
1762 ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest)
1763 : BranchProbability::getZero();
1764 findUnwindDestinations(FuncInfo, UnwindDest, UnwindDestProb, UnwindDests);
1765 for (auto &UnwindDest : UnwindDests) {
1766 UnwindDest.first->setIsEHPad();
1767 addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
1768 }
1769 FuncInfo.MBB->normalizeSuccProbs();
1770
1771 // Create the terminator node.
1772 SDValue Ret =
1773 DAG.getNode(ISD::CLEANUPRET, getCurSDLoc(), MVT::Other, getControlRoot());
1774 DAG.setRoot(Ret);
1775}
1776
1777void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) {
1778 report_fatal_error("visitCatchSwitch not yet implemented!");
1779}
1780
1781void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
1782 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1783 auto &DL = DAG.getDataLayout();
1784 SDValue Chain = getControlRoot();
1785 SmallVector<ISD::OutputArg, 8> Outs;
1786 SmallVector<SDValue, 8> OutVals;
1787
1788 // Calls to @llvm.experimental.deoptimize don't generate a return value, so
1789 // lower
1790 //
1791 // %val = call <ty> @llvm.experimental.deoptimize()
1792 // ret <ty> %val
1793 //
1794 // differently.
1795 if (I.getParent()->getTerminatingDeoptimizeCall()) {
1796 LowerDeoptimizingReturn();
1797 return;
1798 }
1799
1800 if (!FuncInfo.CanLowerReturn) {
1801 unsigned DemoteReg = FuncInfo.DemoteRegister;
1802 const Function *F = I.getParent()->getParent();
1803
1804 // Emit a store of the return value through the virtual register.
1805 // Leave Outs empty so that LowerReturn won't try to load return
1806 // registers the usual way.
1807 SmallVector<EVT, 1> PtrValueVTs;
1808 ComputeValueVTs(TLI, DL,
1809 F->getReturnType()->getPointerTo(
1810 DAG.getDataLayout().getAllocaAddrSpace()),
1811 PtrValueVTs);
1812
1813 SDValue RetPtr = DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(),
1814 DemoteReg, PtrValueVTs[0]);
1815 SDValue RetOp = getValue(I.getOperand(0));
1816
1817 SmallVector<EVT, 4> ValueVTs, MemVTs;
1818 SmallVector<uint64_t, 4> Offsets;
1819 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &MemVTs,
1820 &Offsets);
1821 unsigned NumValues = ValueVTs.size();
1822
1823 SmallVector<SDValue, 4> Chains(NumValues);
1824 Align BaseAlign = DL.getPrefTypeAlign(I.getOperand(0)->getType());
1825 for (unsigned i = 0; i != NumValues; ++i) {
1826 // An aggregate return value cannot wrap around the address space, so
1827 // offsets to its parts don't wrap either.
1828 SDValue Ptr = DAG.getObjectPtrOffset(getCurSDLoc(), RetPtr,
1829 TypeSize::Fixed(Offsets[i]));
1830
1831 SDValue Val = RetOp.getValue(RetOp.getResNo() + i);
1832 if (MemVTs[i] != ValueVTs[i])
1833 Val = DAG.getPtrExtOrTrunc(Val, getCurSDLoc(), MemVTs[i]);
1834 Chains[i] = DAG.getStore(
1835 Chain, getCurSDLoc(), Val,
1836 // FIXME: better loc info would be nice.
1837 Ptr, MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()),
1838 commonAlignment(BaseAlign, Offsets[i]));
1839 }
1840
1841 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
1842 MVT::Other, Chains);
1843 } else if (I.getNumOperands() != 0) {
1844 SmallVector<EVT, 4> ValueVTs;
1845 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs);
1846 unsigned NumValues = ValueVTs.size();
1847 if (NumValues) {
1848 SDValue RetOp = getValue(I.getOperand(0));
1849
1850 const Function *F = I.getParent()->getParent();
1851
1852 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
1853 I.getOperand(0)->getType(), F->getCallingConv(),
1854 /*IsVarArg*/ false);
1855
1856 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1857 if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1858 Attribute::SExt))
1859 ExtendKind = ISD::SIGN_EXTEND;
1860 else if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1861 Attribute::ZExt))
1862 ExtendKind = ISD::ZERO_EXTEND;
1863
1864 LLVMContext &Context = F->getContext();
1865 bool RetInReg = F->getAttributes().hasAttribute(
1866 AttributeList::ReturnIndex, Attribute::InReg);
1867
1868 for (unsigned j = 0; j != NumValues; ++j) {
1869 EVT VT = ValueVTs[j];
1870
1871 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
1872 VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind);
1873
1874 CallingConv::ID CC = F->getCallingConv();
1875
1876 unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, CC, VT);
1877 MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, CC, VT);
1878 SmallVector<SDValue, 4> Parts(NumParts);
1879 getCopyToParts(DAG, getCurSDLoc(),
1880 SDValue(RetOp.getNode(), RetOp.getResNo() + j),
1881 &Parts[0], NumParts, PartVT, &I, CC, ExtendKind);
1882
1883 // 'inreg' on function refers to return value
1884 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1885 if (RetInReg)
1886 Flags.setInReg();
1887
1888 if (I.getOperand(0)->getType()->isPointerTy()) {
1889 Flags.setPointer();
1890 Flags.setPointerAddrSpace(
1891 cast<PointerType>(I.getOperand(0)->getType())->getAddressSpace());
1892 }
1893
1894 if (NeedsRegBlock) {
1895 Flags.setInConsecutiveRegs();
1896 if (j == NumValues - 1)
1897 Flags.setInConsecutiveRegsLast();
1898 }
1899
1900 // Propagate extension type if any
1901 if (ExtendKind == ISD::SIGN_EXTEND)
1902 Flags.setSExt();
1903 else if (ExtendKind == ISD::ZERO_EXTEND)
1904 Flags.setZExt();
1905
1906 for (unsigned i = 0; i < NumParts; ++i) {
1907 Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(),
1908 VT, /*isfixed=*/true, 0, 0));
1909 OutVals.push_back(Parts[i]);
1910 }
1911 }
1912 }
1913 }
1914
1915 // Push in swifterror virtual register as the last element of Outs. This makes
1916 // sure swifterror virtual register will be returned in the swifterror
1917 // physical register.
1918 const Function *F = I.getParent()->getParent();
1919 if (TLI.supportSwiftError() &&
1920 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
1921 assert(SwiftError.getFunctionArg() && "Need a swift error argument");
1922 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1923 Flags.setSwiftError();
1924 Outs.push_back(ISD::OutputArg(Flags, EVT(TLI.getPointerTy(DL)) /*vt*/,
1925 EVT(TLI.getPointerTy(DL)) /*argvt*/,
1926 true /*isfixed*/, 1 /*origidx*/,
1927 0 /*partOffs*/));
1928 // Create SDNode for the swifterror virtual register.
1929 OutVals.push_back(
1930 DAG.getRegister(SwiftError.getOrCreateVRegUseAt(
1931 &I, FuncInfo.MBB, SwiftError.getFunctionArg()),
1932 EVT(TLI.getPointerTy(DL))));
1933 }
1934
1935 bool isVarArg = DAG.getMachineFunction().getFunction().isVarArg();
1936 CallingConv::ID CallConv =
1937 DAG.getMachineFunction().getFunction().getCallingConv();
1938 Chain = DAG.getTargetLoweringInfo().LowerReturn(
1939 Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
1940
1941 // Verify that the target's LowerReturn behaved as expected.
1942 assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
1943 "LowerReturn didn't return a valid chain!");
1944
1945 // Update the DAG with the new chain value resulting from return lowering.
1946 DAG.setRoot(Chain);
1947}
1948
1949/// CopyToExportRegsIfNeeded - If the given value has virtual registers
1950/// created for it, emit nodes to copy the value into the virtual
1951/// registers.
1952void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
1953 // Skip empty types
1954 if (V->getType()->isEmptyTy())
1955 return;
1956
1957 DenseMap<const Value *, Register>::iterator VMI = FuncInfo.ValueMap.find(V);
1958 if (VMI != FuncInfo.ValueMap.end()) {
1959 assert(!V->use_empty() && "Unused value assigned virtual registers!");
1960 CopyValueToVirtualRegister(V, VMI->second);
1961 }
1962}
1963
1964/// ExportFromCurrentBlock - If this condition isn't known to be exported from
1965/// the current basic block, add it to ValueMap now so that we'll get a
1966/// CopyTo/FromReg.
1967void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
1968 // No need to export constants.
1969 if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
1970
1971 // Already exported?
1972 if (FuncInfo.isExportedInst(V)) return;
1973
1974 unsigned Reg = FuncInfo.InitializeRegForValue(V);
1975 CopyValueToVirtualRegister(V, Reg);
1976}
1977
1978bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
1979 const BasicBlock *FromBB) {
1980 // The operands of the setcc have to be in this block. We don't know
1981 // how to export them from some other block.
1982 if (const Instruction *VI = dyn_cast<Instruction>(V)) {
1983 // Can export from current BB.
1984 if (VI->getParent() == FromBB)
1985 return true;
1986
1987 // Is already exported, noop.
1988 return FuncInfo.isExportedInst(V);
1989 }
1990
1991 // If this is an argument, we can export it if the BB is the entry block or
1992 // if it is already exported.
1993 if (isa<Argument>(V)) {
1994 if (FromBB == &FromBB->getParent()->getEntryBlock())
1995 return true;
1996
1997 // Otherwise, can only export this if it is already exported.
1998 return FuncInfo.isExportedInst(V);
1999 }
2000
2001 // Otherwise, constants can always be exported.
2002 return true;
2003}
2004
2005/// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
2006BranchProbability
2007SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src,
2008 const MachineBasicBlock *Dst) const {
2009 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2010 const BasicBlock *SrcBB = Src->getBasicBlock();
2011 const BasicBlock *DstBB = Dst->getBasicBlock();
2012 if (!BPI) {
2013 // If BPI is not available, set the default probability as 1 / N, where N is
2014 // the number of successors.
2015 auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
2016 return BranchProbability(1, SuccSize);
2017 }
2018 return BPI->getEdgeProbability(SrcBB, DstBB);
2019}
2020
2021void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src,
2022 MachineBasicBlock *Dst,
2023 BranchProbability Prob) {
2024 if (!FuncInfo.BPI)
2025 Src->addSuccessorWithoutProb(Dst);
2026 else {
2027 if (Prob.isUnknown())
2028 Prob = getEdgeProbability(Src, Dst);
2029 Src->addSuccessor(Dst, Prob);
2030 }
2031}
2032
2033static bool InBlock(const Value *V, const BasicBlock *BB) {
2034 if (const Instruction *I = dyn_cast<Instruction>(V))
2035 return I->getParent() == BB;
2036 return true;
2037}
2038
2039/// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
2040/// This function emits a branch and is used at the leaves of an OR or an
2041/// AND operator tree.
2042void
2043SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
2044 MachineBasicBlock *TBB,
2045 MachineBasicBlock *FBB,
2046 MachineBasicBlock *CurBB,
2047 MachineBasicBlock *SwitchBB,
2048 BranchProbability TProb,
2049 BranchProbability FProb,
2050 bool InvertCond) {
2051 const BasicBlock *BB = CurBB->getBasicBlock();
2052
2053 // If the leaf of the tree is a comparison, merge the condition into
2054 // the caseblock.
2055 if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
2056 // The operands of the cmp have to be in this block. We don't know
2057 // how to export them from some other block. If this is the first block
2058 // of the sequence, no exporting is needed.
2059 if (CurBB == SwitchBB ||
2060 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
2061 isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
2062 ISD::CondCode Condition;
2063 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
2064 ICmpInst::Predicate Pred =
2065 InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2066 Condition = getICmpCondCode(Pred);
2067 } else {
2068 const FCmpInst *FC = cast<FCmpInst>(Cond);
2069 FCmpInst::Predicate Pred =
2070 InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2071 Condition = getFCmpCondCode(Pred);
2072 if (TM.Options.NoNaNsFPMath)
2073 Condition = getFCmpCodeWithoutNaN(Condition);
2074 }
2075
2076 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
2077 TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2078 SL->SwitchCases.push_back(CB);
2079 return;
2080 }
2081 }
2082
2083 // Create a CaseBlock record representing this branch.
2084 ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ;
2085 CaseBlock CB(Opc, Cond, ConstantInt::getTrue(*DAG.getContext()),
2086 nullptr, TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2087 SL->SwitchCases.push_back(CB);
2088}
2089
2090void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
2091 MachineBasicBlock *TBB,
2092 MachineBasicBlock *FBB,
2093 MachineBasicBlock *CurBB,
2094 MachineBasicBlock *SwitchBB,
2095 Instruction::BinaryOps Opc,
2096 BranchProbability TProb,
2097 BranchProbability FProb,
2098 bool InvertCond) {
2099 // Skip over not part of the tree and remember to invert op and operands at
2100 // next level.
2101 Value *NotCond;
2102 if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
2103 InBlock(NotCond, CurBB->getBasicBlock())) {
2104 FindMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
2105 !InvertCond);
2106 return;
2107 }
2108
2109 const Instruction *BOp = dyn_cast<Instruction>(Cond);
2110 const Value *BOpOp0, *BOpOp1;
2111 // Compute the effective opcode for Cond, taking into account whether it needs
2112 // to be inverted, e.g.
2113 // and (not (or A, B)), C
2114 // gets lowered as
2115 // and (and (not A, not B), C)
2116 Instruction::BinaryOps BOpc = (Instruction::BinaryOps)0;
2117 if (BOp) {
2118 BOpc = match(BOp, m_LogicalAnd(m_Value(BOpOp0), m_Value(BOpOp1)))
2119 ? Instruction::And
2120 : (match(BOp, m_LogicalOr(m_Value(BOpOp0), m_Value(BOpOp1)))
2121 ? Instruction::Or
2122 : (Instruction::BinaryOps)0);
2123 if (InvertCond) {
2124 if (BOpc == Instruction::And)
2125 BOpc = Instruction::Or;
2126 else if (BOpc == Instruction::Or)
2127 BOpc = Instruction::And;
2128 }
2129 }
2130
2131 // If this node is not part of the or/and tree, emit it as a branch.
2132 // Note that all nodes in the tree should have same opcode.
2133 bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();
2134 if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() ||
2135 !InBlock(BOpOp0, CurBB->getBasicBlock()) ||
2136 !InBlock(BOpOp1, CurBB->getBasicBlock())) {
2137 EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
2138 TProb, FProb, InvertCond);
2139 return;
2140 }
2141
2142 // Create TmpBB after CurBB.
2143 MachineFunction::iterator BBI(CurBB);
2144 MachineFunction &MF = DAG.getMachineFunction();
2145 MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
2146 CurBB->getParent()->insert(++BBI, TmpBB);
2147
2148 if (Opc == Instruction::Or) {
2149 // Codegen X | Y as:
2150 // BB1:
2151 // jmp_if_X TBB
2152 // jmp TmpBB
2153 // TmpBB:
2154 // jmp_if_Y TBB
2155 // jmp FBB
2156 //
2157
2158 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2159 // The requirement is that
2160 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
2161 // = TrueProb for original BB.
2162 // Assuming the original probabilities are A and B, one choice is to set
2163 // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
2164 // A/(1+B) and 2B/(1+B). This choice assumes that
2165 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
2166 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
2167 // TmpBB, but the math is more complicated.
2168
2169 auto NewTrueProb = TProb / 2;
2170 auto NewFalseProb = TProb / 2 + FProb;
2171 // Emit the LHS condition.
2172 FindMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
2173 NewFalseProb, InvertCond);
2174
2175 // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
2176 SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
2177 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2178 // Emit the RHS condition into TmpBB.
2179 FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
2180 Probs[1], InvertCond);
2181 } else {
2182 assert(Opc == Instruction::And && "Unknown merge op!");
2183 // Codegen X & Y as:
2184 // BB1:
2185 // jmp_if_X TmpBB
2186 // jmp FBB
2187 // TmpBB:
2188 // jmp_if_Y TBB
2189 // jmp FBB
2190 //
2191 // This requires creation of TmpBB after CurBB.
2192
2193 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2194 // The requirement is that
2195 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
2196 // = FalseProb for original BB.
2197 // Assuming the original probabilities are A and B, one choice is to set
2198 // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
2199 // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
2200 // TrueProb for BB1 * FalseProb for TmpBB.
2201
2202 auto NewTrueProb = TProb + FProb / 2;
2203 auto NewFalseProb = FProb / 2;
2204 // Emit the LHS condition.
2205 FindMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
2206 NewFalseProb, InvertCond);
2207
2208 // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
2209 SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
2210 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2211 // Emit the RHS condition into TmpBB.
2212 FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
2213 Probs[1], InvertCond);
2214 }
2215}
2216
2217/// If the set of cases should be emitted as a series of branches, return true.
2218/// If we should emit this as a bunch of and/or'd together conditions, return
2219/// false.
2220bool
2221SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
2222 if (Cases.size() != 2) return true;
2223
2224 // If this is two comparisons of the same values or'd or and'd together, they
2225 // will get folded into a single comparison, so don't emit two blocks.
2226 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2227 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2228 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2229 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2230 return false;
2231 }
2232
2233 // Handle: (X != null) | (Y != null) --> (X|Y) != 0
2234 // Handle: (X == null) & (Y == null) --> (X|Y) == 0
2235 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2236 Cases[0].CC == Cases[1].CC &&
2237 isa<Constant>(Cases[0].CmpRHS) &&
2238 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
2239 if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2240 return false;
2241 if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2242 return false;
2243 }
2244
2245 return true;
2246}
2247
2248void SelectionDAGBuilder::visitBr(const BranchInst &I) {
2249 MachineBasicBlock *BrMBB = FuncInfo.MBB;
2250
2251 // Update machine-CFG edges.
2252 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
2253
2254 if (I.isUnconditional()) {
2255 // Update machine-CFG edges.
2256 BrMBB->addSuccessor(Succ0MBB);
2257
2258 // If this is not a fall-through branch or optimizations are switched off,
2259 // emit the branch.
2260 if (Succ0MBB != NextBlock(BrMBB) || TM.getOptLevel() == CodeGenOpt::None)
2261 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
2262 MVT::Other, getControlRoot(),
2263 DAG.getBasicBlock(Succ0MBB)));
2264
2265 return;
2266 }
2267
2268 // If this condition is one of the special cases we handle, do special stuff
2269 // now.
2270 const Value *CondVal = I.getCondition();
2271 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
2272
2273 // If this is a series of conditions that are or'd or and'd together, emit
2274 // this as a sequence of branches instead of setcc's with and/or operations.
2275 // As long as jumps are not expensive (exceptions for multi-use logic ops,
2276 // unpredictable branches, and vector extracts because those jumps are likely
2277 // expensive for any target), this should improve performance.
2278 // For example, instead of something like:
2279 // cmp A, B
2280 // C = seteq
2281 // cmp D, E
2282 // F = setle
2283 // or C, F
2284 // jnz foo
2285 // Emit:
2286 // cmp A, B
2287 // je foo
2288 // cmp D, E
2289 // jle foo
2290 const Instruction *BOp = dyn_cast<Instruction>(CondVal);
2291 if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp &&
2292 BOp->hasOneUse() && !I.hasMetadata(LLVMContext::MD_unpredictable)) {
2293 Value *Vec;
2294 const Value *BOp0, *BOp1;
2295 Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0;
2296 if (match(BOp, m_LogicalAnd(m_Value(BOp0), m_Value(BOp1))))
2297 Opcode = Instruction::And;
2298 else if (match(BOp, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
2299 Opcode = Instruction::Or;
2300
2301 if (Opcode && !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
2302 match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value())))) {
2303 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB, Opcode,
2304 getEdgeProbability(BrMBB, Succ0MBB),
2305 getEdgeProbability(BrMBB, Succ1MBB),
2306 /*InvertCond=*/false);
2307 // If the compares in later blocks need to use values not currently
2308 // exported from this block, export them now. This block should always
2309 // be the first entry.
2310 assert(SL->SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
2311
2312 // Allow some cases to be rejected.
2313 if (ShouldEmitAsBranches(SL->SwitchCases)) {
2314 for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i) {
2315 ExportFromCurrentBlock(SL->SwitchCases[i].CmpLHS);
2316 ExportFromCurrentBlock(SL->SwitchCases[i].CmpRHS);
2317 }
2318
2319 // Emit the branch for this block.
2320 visitSwitchCase(SL->SwitchCases[0], BrMBB);
2321 SL->SwitchCases.erase(SL->SwitchCases.begin());
2322 return;
2323 }
2324
2325 // Okay, we decided not to do this, remove any inserted MBB's and clear
2326 // SwitchCases.
2327 for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i)
2328 FuncInfo.MF->erase(SL->SwitchCases[i].ThisBB);
2329
2330 SL->SwitchCases.clear();
2331 }
2332 }
2333
2334 // Create a CaseBlock record representing this branch.
2335 CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
2336 nullptr, Succ0MBB, Succ1MBB, BrMBB, getCurSDLoc());
2337
2338 // Use visitSwitchCase to actually insert the fast branch sequence for this
2339 // cond branch.
2340 visitSwitchCase(CB, BrMBB);
2341}
2342
2343/// visitSwitchCase - Emits the necessary code to represent a single node in
2344/// the binary search tree resulting from lowering a switch instruction.
2345void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
2346 MachineBasicBlock *SwitchBB) {
2347 SDValue Cond;
2348 SDValue CondLHS = getValue(CB.CmpLHS);
2349 SDLoc dl = CB.DL;
2350
2351 if (CB.CC == ISD::SETTRUE) {
2352 // Branch or fall through to TrueBB.
2353 addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2354 SwitchBB->normalizeSuccProbs();
2355 if (CB.TrueBB != NextBlock(SwitchBB)) {
2356 DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, getControlRoot(),
2357 DAG.getBasicBlock(CB.TrueBB)));
2358 }
2359 return;
2360 }
2361
2362 auto &TLI = DAG.getTargetLoweringInfo();
2363 EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), CB.CmpLHS->getType());
2364
2365 // Build the setcc now.
2366 if (!CB.CmpMHS) {
2367 // Fold "(X == true)" to X and "(X == false)" to !X to
2368 // handle common cases produced by branch lowering.
2369 if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
2370 CB.CC == ISD::SETEQ)
2371 Cond = CondLHS;
2372 else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
2373 CB.CC == ISD::SETEQ) {
2374 SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType());
2375 Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
2376 } else {
2377 SDValue CondRHS = getValue(CB.CmpRHS);
2378
2379 // If a pointer's DAG type is larger than its memory type then the DAG
2380 // values are zero-extended. This breaks signed comparisons so truncate
2381 // back to the underlying type before doing the compare.
2382 if (CondLHS.getValueType() != MemVT) {
2383 CondLHS = DAG.getPtrExtOrTrunc(CondLHS, getCurSDLoc(), MemVT);
2384 CondRHS = DAG.getPtrExtOrTrunc(CondRHS, getCurSDLoc(), MemVT);
2385 }
2386 Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, CondRHS, CB.CC);
2387 }
2388 } else {
2389 assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
2390
2391 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
2392 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
2393
2394 SDValue CmpOp = getValue(CB.CmpMHS);
2395 EVT VT = CmpOp.getValueType();
2396
2397 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
2398 Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT),
2399 ISD::SETLE);
2400 } else {
2401 SDValue SUB = DAG.getNode(ISD::SUB, dl,
2402 VT, CmpOp, DAG.getConstant(Low, dl, VT));
2403 Cond = DAG.getSetCC(dl, MVT::i1, SUB,
2404 DAG.getConstant(High-Low, dl, VT), ISD::SETULE);
2405 }
2406 }
2407
2408 // Update successor info
2409 addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2410 // TrueBB and FalseBB are always different unless the incoming IR is
2411 // degenerate. This only happens when running llc on weird IR.
2412 if (CB.TrueBB != CB.FalseBB)
2413 addSuccessorWithProb(SwitchBB, CB.FalseBB, CB.FalseProb);
2414 SwitchBB->normalizeSuccProbs();
2415
2416 // If the lhs block is the next block, invert the condition so that we can
2417 // fall through to the lhs instead of the rhs block.
2418 if (CB.TrueBB == NextBlock(SwitchBB)) {
2419 std::swap(CB.TrueBB, CB.FalseBB);
2420 SDValue True = DAG.getConstant(1, dl, Cond.getValueType());
2421 Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
2422 }
2423
2424 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2425 MVT::Other, getControlRoot(), Cond,
2426 DAG.getBasicBlock(CB.TrueBB));
2427
2428 // Insert the false branch. Do this even if it's a fall through branch,
2429 // this makes it easier to do DAG optimizations which require inverting
2430 // the branch condition.
2431 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2432 DAG.getBasicBlock(CB.FalseBB));
2433
2434 DAG.setRoot(BrCond);
2435}
2436
2437/// visitJumpTable - Emit JumpTable node in the current MBB
2438void SelectionDAGBuilder::visitJumpTable(SwitchCG::JumpTable &JT) {
2439 // Emit the code for the jump table
2440 assert(JT.Reg != -1U && "Should lower JT Header first!");
2441 EVT PTy = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
2442 SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(),
2443 JT.Reg, PTy);
2444 SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
2445 SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurSDLoc(),
2446 MVT::Other, Index.getValue(1),
2447 Table, Index);
2448 DAG.setRoot(BrJumpTable);
2449}
2450
2451/// visitJumpTableHeader - This function emits necessary code to produce index
2452/// in the JumpTable from switch case.
2453void SelectionDAGBuilder::visitJumpTableHeader(SwitchCG::JumpTable &JT,
2454 JumpTableHeader &JTH,
2455 MachineBasicBlock *SwitchBB) {
2456 SDLoc dl = getCurSDLoc();
2457
2458 // Subtract the lowest switch case value from the value being switched on.
2459 SDValue SwitchOp = getValue(JTH.SValue);
2460 EVT VT = SwitchOp.getValueType();
2461 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
2462 DAG.getConstant(JTH.First, dl, VT));
2463
2464 // The SDNode we just created, which holds the value being switched on minus
2465 // the smallest case value, needs to be copied to a virtual register so it
2466 // can be used as an index into the jump table in a subsequent basic block.
2467 // This value may be smaller or larger than the target's pointer type, and
2468 // therefore require extension or truncating.
2469 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2470 SwitchOp = DAG.getZExtOrTrunc(Sub, dl, TLI.getPointerTy(DAG.getDataLayout()));
2471
2472 unsigned JumpTableReg =
2473 FuncInfo.CreateReg(TLI.getPointerTy(DAG.getDataLayout()));
2474 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl,
2475 JumpTableReg, SwitchOp);
2476 JT.Reg = JumpTableReg;
2477
2478 if (!JTH.OmitRangeCheck) {
2479 // Emit the range check for the jump table, and branch to the default block
2480 // for the switch statement if the value being switched on exceeds the
2481 // largest case in the switch.
2482 SDValue CMP = DAG.getSetCC(
2483 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2484 Sub.getValueType()),
2485 Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT);
2486
2487 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2488 MVT::Other, CopyTo, CMP,
2489 DAG.getBasicBlock(JT.Default));
2490
2491 // Avoid emitting unnecessary branches to the next block.
2492 if (JT.MBB != NextBlock(SwitchBB))
2493 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2494 DAG.getBasicBlock(JT.MBB));
2495
2496 DAG.setRoot(BrCond);
2497 } else {
2498 // Avoid emitting unnecessary branches to the next block.
2499 if (JT.MBB != NextBlock(SwitchBB))
2500 DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, CopyTo,
2501 DAG.getBasicBlock(JT.MBB)));
2502 else
2503 DAG.setRoot(CopyTo);
2504 }
2505}
2506
2507/// Create a LOAD_STACK_GUARD node, and let it carry the target specific global
2508/// variable if there exists one.
2509static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL,
2510 SDValue &Chain) {
2511 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2512 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2513 EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
2514 MachineFunction &MF = DAG.getMachineFunction();
2515 Value *Global = TLI.getSDagStackGuard(*MF.getFunction().getParent());
2516 MachineSDNode *Node =
2517 DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain);
2518 if (Global) {
2519 MachinePointerInfo MPInfo(Global);
2520 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
2521 MachineMemOperand::MODereferenceable;
2522 MachineMemOperand *MemRef = MF.getMachineMemOperand(
2523 MPInfo, Flags, PtrTy.getSizeInBits() / 8, DAG.getEVTAlign(PtrTy));
2524 DAG.setNodeMemRefs(Node, {MemRef});
2525 }
2526 if (PtrTy != PtrMemTy)
2527 return DAG.getPtrExtOrTrunc(SDValue(Node, 0), DL, PtrMemTy);
2528 return SDValue(Node, 0);
2529}
2530
2531/// Codegen a new tail for a stack protector check ParentMBB which has had its
2532/// tail spliced into a stack protector check success bb.
2533///
2534/// For a high level explanation of how this fits into the stack protector
2535/// generation see the comment on the declaration of class
2536/// StackProtectorDescriptor.
2537void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
2538 MachineBasicBlock *ParentBB) {
2539
2540 // First create the loads to the guard/stack slot for the comparison.
2541 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2542 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2543 EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
2544
2545 MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
2546 int FI = MFI.getStackProtectorIndex();
2547
2548 SDValue Guard;
2549 SDLoc dl = getCurSDLoc();
2550 SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
2551 const Module &M = *ParentBB->getParent()->getFunction().getParent();
2552 Align Align = DL->getPrefTypeAlign(Type::getInt8PtrTy(M.getContext()));
2553
2554 // Generate code to load the content of the guard slot.
2555 SDValue GuardVal = DAG.getLoad(
2556 PtrMemTy, dl, DAG.getEntryNode(), StackSlotPtr,
2557 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align,
2558 MachineMemOperand::MOVolatile);
2559
2560 if (TLI.useStackGuardXorFP())
2561 GuardVal = TLI.emitStackGuardXorFP(DAG, GuardVal, dl);
2562
2563 // Retrieve guard check function, nullptr if instrumentation is inlined.
2564 if (const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M)) {
2565 // The target provides a guard check function to validate the guard value.
2566 // Generate a call to that function with the content of the guard slot as
2567 // argument.
2568 FunctionType *FnTy = GuardCheckFn->getFunctionType();
2569 assert(FnTy->getNumParams() == 1 && "Invalid function signature");
2570
2571 TargetLowering::ArgListTy Args;
2572 TargetLowering::ArgListEntry Entry;
2573 Entry.Node = GuardVal;
2574 Entry.Ty = FnTy->getParamType(0);
2575 if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg))
2576 Entry.IsInReg = true;
2577 Args.push_back(Entry);
2578
2579 TargetLowering::CallLoweringInfo CLI(DAG);
2580 CLI.setDebugLoc(getCurSDLoc())
2581 .setChain(DAG.getEntryNode())
2582 .setCallee(GuardCheckFn->getCallingConv(), FnTy->getReturnType(),
2583 getValue(GuardCheckFn), std::move(Args));
2584
2585 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
2586 DAG.setRoot(Result.second);
2587 return;
2588 }
2589
2590 // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
2591 // Otherwise, emit a volatile load to retrieve the stack guard value.
2592 SDValue Chain = DAG.getEntryNode();
2593 if (TLI.useLoadStackGuardNode()) {
2594 Guard = getLoadStackGuard(DAG, dl, Chain);
2595 } else {
2596 const Value *IRGuard = TLI.getSDagStackGuard(M);
2597 SDValue GuardPtr = getValue(IRGuard);
2598
2599 Guard = DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr,
2600 MachinePointerInfo(IRGuard, 0), Align,
2601 MachineMemOperand::MOVolatile);
2602 }
2603
2604 // Perform the comparison via a getsetcc.
2605 SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(),
2606 *DAG.getContext(),
2607 Guard.getValueType()),
2608 Guard, GuardVal, ISD::SETNE);
2609
2610 // If the guard/stackslot do not equal, branch to failure MBB.
2611 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2612 MVT::Other, GuardVal.getOperand(0),
2613 Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
2614 // Otherwise branch to success MBB.
2615 SDValue Br = DAG.getNode(ISD::BR, dl,
2616 MVT::Other, BrCond,
2617 DAG.getBasicBlock(SPD.getSuccessMBB()));
2618
2619 DAG.setRoot(Br);
2620}
2621
2622/// Codegen the failure basic block for a stack protector check.
2623///
2624/// A failure stack protector machine basic block consists simply of a call to
2625/// __stack_chk_fail().
2626///
2627/// For a high level explanation of how this fits into the stack protector
2628/// generation see the comment on the declaration of class
2629/// StackProtectorDescriptor.
2630void
2631SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
2632 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2633 TargetLowering::MakeLibCallOptions CallOptions;
2634 CallOptions.setDiscardResult(true);
2635 SDValue Chain =
2636 TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
2637 None, CallOptions, getCurSDLoc()).second;
2638 // On PS4, the "return address" must still be within the calling function,
2639 // even if it's at the very end, so emit an explicit TRAP here.
2640 // Passing 'true' for doesNotReturn above won't generate the trap for us.
2641 if (TM.getTargetTriple().isPS4CPU())
2642 Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain);
2643 // WebAssembly needs an unreachable instruction after a non-returning call,
2644 // because the function return type can be different from __stack_chk_fail's
2645 // return type (void).
2646 if (TM.getTargetTriple().isWasm())
2647 Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain);
2648
2649 DAG.setRoot(Chain);
2650}
2651
2652/// visitBitTestHeader - This function emits necessary code to produce value
2653/// suitable for "bit tests"
2654void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
2655 MachineBasicBlock *SwitchBB) {
2656 SDLoc dl = getCurSDLoc();
2657
2658 // Subtract the minimum value.
2659 SDValue SwitchOp = getValue(B.SValue);
2660 EVT VT = SwitchOp.getValueType();
2661 SDValue RangeSub =
2662 DAG.getNode(ISD::SUB, dl, VT, SwitchOp, DAG.getConstant(B.First, dl, VT));
2663
2664 // Determine the type of the test operands.
2665 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2666 bool UsePtrType = false;
2667 if (!TLI.isTypeLegal(VT)) {
2668 UsePtrType = true;
2669 } else {
2670 for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
2671 if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
2672 // Switch table case range are encoded into series of masks.
2673 // Just use pointer type, it's guaranteed to fit.
2674 UsePtrType = true;
2675 break;
2676 }
2677 }
2678 SDValue Sub = RangeSub;
2679 if (UsePtrType) {
2680 VT = TLI.getPointerTy(DAG.getDataLayout());
2681 Sub = DAG.getZExtOrTrunc(Sub, dl, VT);
2682 }
2683
2684 B.RegVT = VT.getSimpleVT();
2685 B.Reg = FuncInfo.CreateReg(B.RegVT);
2686 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub);
2687
2688 MachineBasicBlock* MBB = B.Cases[0].ThisBB;
2689
2690 if (!B.OmitRangeCheck)
2691 addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
2692 addSuccessorWithProb(SwitchBB, MBB, B.Prob);
2693 SwitchBB->normalizeSuccProbs();
2694
2695 SDValue Root = CopyTo;
2696 if (!B.OmitRangeCheck) {
2697 // Conditional branch to the default block.
2698 SDValue RangeCmp = DAG.getSetCC(dl,
2699 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2700 RangeSub.getValueType()),
2701 RangeSub, DAG.getConstant(B.Range, dl, RangeSub.getValueType()),
2702 ISD::SETUGT);
2703
2704 Root = DAG.getNode(ISD::BRCOND, dl, MVT::Other, Root, RangeCmp,
2705 DAG.getBasicBlock(B.Default));
2706 }
2707
2708 // Avoid emitting unnecessary branches to the next block.
2709 if (MBB != NextBlock(SwitchBB))
2710 Root = DAG.getNode(ISD::BR, dl, MVT::Other, Root, DAG.getBasicBlock(MBB));
2711
2712 DAG.setRoot(Root);
2713}
2714
2715/// visitBitTestCase - this function produces one "bit test"
2716void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
2717 MachineBasicBlock* NextMBB,
2718 BranchProbability BranchProbToNext,
2719 unsigned Reg,
2720 BitTestCase &B,
2721 MachineBasicBlock *SwitchBB) {
2722 SDLoc dl = getCurSDLoc();
2723 MVT VT = BB.RegVT;
2724 SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT);
2725 SDValue Cmp;
2726 unsigned PopCount = countPopulation(B.Mask);
2727 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2728 if (PopCount == 1) {
2729 // Testing for a single bit; just compare the shift count with what it
2730 // would need to be to shift a 1 bit in that position.
2731 Cmp = DAG.getSetCC(
2732 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2733 ShiftOp, DAG.getConstant(countTrailingZeros(B.Mask), dl, VT),
2734 ISD::SETEQ);
2735 } else if (PopCount == BB.Range) {
2736 // There is only one zero bit in the range, test for it directly.
2737 Cmp = DAG.getSetCC(
2738 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2739 ShiftOp, DAG.getConstant(countTrailingOnes(B.Mask), dl, VT),
2740 ISD::SETNE);
2741 } else {
2742 // Make desired shift
2743 SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT,
2744 DAG.getConstant(1, dl, VT), ShiftOp);
2745
2746 // Emit bit tests and jumps
2747 SDValue AndOp = DAG.getNode(ISD::AND, dl,
2748 VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT));
2749 Cmp = DAG.getSetCC(
2750 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2751 AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE);
2752 }
2753
2754 // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
2755 addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
2756 // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
2757 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
2758 // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
2759 // one as they are relative probabilities (and thus work more like weights),
2760 // and hence we need to normalize them to let the sum of them become one.
2761 SwitchBB->normalizeSuccProbs();
2762
2763 SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl,
2764 MVT::Other, getControlRoot(),
2765 Cmp, DAG.getBasicBlock(B.TargetBB));
2766
2767 // Avoid emitting unnecessary branches to the next block.
2768 if (NextMBB != NextBlock(SwitchBB))
2769 BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
2770 DAG.getBasicBlock(NextMBB));
2771
2772 DAG.setRoot(BrAnd);
2773}
2774
2775void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
2776 MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
2777
2778 // Retrieve successors. Look through artificial IR level blocks like
2779 // catchswitch for successors.
2780 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
2781 const BasicBlock *EHPadBB = I.getSuccessor(1);
2782
2783 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
2784 // have to do anything here to lower funclet bundles.
2785 assert(!I.hasOperandBundlesOtherThan({LLVMContext::OB_deopt,
2786 LLVMContext::OB_gc_transition,
2787 LLVMContext::OB_gc_live,
2788 LLVMContext::OB_funclet,
2789 LLVMContext::OB_cfguardtarget}) &&
2790 "Cannot lower invokes with arbitrary operand bundles yet!");
2791
2792 const Value *Callee(I.getCalledOperand());
2793 const Function *Fn = dyn_cast<Function>(Callee);
2794 if (isa<InlineAsm>(Callee))
2795 visitInlineAsm(I);
2796 else if (Fn && Fn->isIntrinsic()) {
2797 switch (Fn->getIntrinsicID()) {
2798 default:
2799 llvm_unreachable("Cannot invoke this intrinsic");
2800 case Intrinsic::donothing:
2801 // Ignore invokes to @llvm.donothing: jump directly to the next BB.
2802 break;
2803 case Intrinsic::experimental_patchpoint_void:
2804 case Intrinsic::experimental_patchpoint_i64:
2805 visitPatchpoint(I, EHPadBB);
2806 break;
2807 case Intrinsic::experimental_gc_statepoint:
2808 LowerStatepoint(cast<GCStatepointInst>(I), EHPadBB);
2809 break;
2810 case Intrinsic::wasm_rethrow: {
2811 // This is usually done in visitTargetIntrinsic, but this intrinsic is
2812 // special because it can be invoked, so we manually lower it to a DAG
2813 // node here.
2814 SmallVector<SDValue, 8> Ops;
2815 Ops.push_back(getRoot()); // inchain
2816 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2817 Ops.push_back(
2818 DAG.getTargetConstant(Intrinsic::wasm_rethrow, getCurSDLoc(),
2819 TLI.getPointerTy(DAG.getDataLayout())));
2820 SDVTList VTs = DAG.getVTList(ArrayRef<EVT>({MVT::Other})); // outchain
2821 DAG.setRoot(DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops));
2822 break;
2823 }
2824 }
2825 } else if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) {
2826 // Currently we do not lower any intrinsic calls with deopt operand bundles.
2827 // Eventually we will support lowering the @llvm.experimental.deoptimize
2828 // intrinsic, and right now there are no plans to support other intrinsics
2829 // with deopt state.
2830 LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB);
2831 } else {
2832 LowerCallTo(I, getValue(Callee), false, EHPadBB);
2833 }
2834
2835 // If the value of the invoke is used outside of its defining block, make it
2836 // available as a virtual register.
2837 // We already took care of the exported value for the statepoint instruction
2838 // during call to the LowerStatepoint.
2839 if (!isa<GCStatepointInst>(I)) {
2840 CopyToExportRegsIfNeeded(&I);
2841 }
2842
2843 SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
2844 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2845 BranchProbability EHPadBBProb =
2846 BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
2847 : BranchProbability::getZero();
2848 findUnwindDestinations(FuncInfo, EHPadBB, EHPadBBProb, UnwindDests);
2849
2850 // Update successor info.
2851 addSuccessorWithProb(InvokeMBB, Return);
2852 for (auto &UnwindDest : UnwindDests) {
2853 UnwindDest.first->setIsEHPad();
2854 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
2855 }
2856 InvokeMBB->normalizeSuccProbs();
2857
2858 // Drop into normal successor.
2859 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, getControlRoot(),
2860 DAG.getBasicBlock(Return)));
2861}
2862
2863void SelectionDAGBuilder::visitCallBr(const CallBrInst &I) {
2864 MachineBasicBlock *CallBrMBB = FuncInfo.MBB;
2865
2866 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
2867 // have to do anything here to lower funclet bundles.
2868 assert(!I.hasOperandBundlesOtherThan(
2869 {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
2870 "Cannot lower callbrs with arbitrary operand bundles yet!");
2871
2872 assert(I.isInlineAsm() && "Only know how to handle inlineasm callbr");
2873 visitInlineAsm(I);
2874 CopyToExportRegsIfNeeded(&I);
2875
2876 // Retrieve successors.
2877 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getDefaultDest()];
2878
2879 // Update successor info.
2880 addSuccessorWithProb(CallBrMBB, Return, BranchProbability::getOne());
2881 for (unsigned i = 0, e = I.getNumIndirectDests(); i < e; ++i) {
2882 MachineBasicBlock *Target = FuncInfo.MBBMap[I.getIndirectDest(i)];
2883 addSuccessorWithProb(CallBrMBB, Target, BranchProbability::getZero());
2884 Target->setIsInlineAsmBrIndirectTarget();
2885 }
2886 CallBrMBB->normalizeSuccProbs();
2887
2888 // Drop into default successor.
2889 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
2890 MVT::Other, getControlRoot(),
2891 DAG.getBasicBlock(Return)));
2892}
2893
2894void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
2895 llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
2896}
2897
2898void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
2899 assert(FuncInfo.MBB->isEHPad() &&
2900 "Call to landingpad not in landing pad!");
2901
2902 // If there aren't registers to copy the values into (e.g., during SjLj
2903 // exceptions), then don't bother to create these DAG nodes.
2904 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2905 const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn();
2906 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
2907 TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
2908 return;
2909
2910 // If landingpad's return type is token type, we don't create DAG nodes
2911 // for its exception pointer and selector value. The extraction of exception
2912 // pointer or selector value from token type landingpads is not currently
2913 // supported.
2914 if (LP.getType()->isTokenTy())
2915 return;
2916
2917 SmallVector<EVT, 2> ValueVTs;
2918 SDLoc dl = getCurSDLoc();
2919 ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs);
2920 assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
2921
2922 // Get the two live-in registers as SDValues. The physregs have already been
2923 // copied into virtual registers.
2924 SDValue Ops[2];
2925 if (FuncInfo.ExceptionPointerVirtReg) {
2926 Ops[0] = DAG.getZExtOrTrunc(
2927 DAG.getCopyFromReg(DAG.getEntryNode(), dl,
2928 FuncInfo.ExceptionPointerVirtReg,
2929 TLI.getPointerTy(DAG.getDataLayout())),
2930 dl, ValueVTs[0]);
2931 } else {
2932 Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout()));
2933 }
2934 Ops[1] = DAG.getZExtOrTrunc(
2935 DAG.getCopyFromReg(DAG.getEntryNode(), dl,
2936 FuncInfo.ExceptionSelectorVirtReg,
2937 TLI.getPointerTy(DAG.getDataLayout())),
2938 dl, ValueVTs[1]);
2939
2940 // Merge into one.
2941 SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
2942 DAG.getVTList(ValueVTs), Ops);
2943 setValue(&LP, Res);
2944}
2945
2946void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
2947 MachineBasicBlock *Last) {
2948 // Update JTCases.
2949 for (unsigned i = 0, e = SL->JTCases.size(); i != e; ++i)
2950 if (SL->JTCases[i].first.HeaderBB == First)
2951 SL->JTCases[i].first.HeaderBB = Last;
2952
2953 // Update BitTestCases.
2954 for (unsigned i = 0, e = SL->BitTestCases.size(); i != e; ++i)
2955 if (SL->BitTestCases[i].Parent == First)
2956 SL->BitTestCases[i].Parent = Last;
2957}
2958
2959void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
2960 MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
2961
2962 // Update machine-CFG edges with unique successors.
2963 SmallSet<BasicBlock*, 32> Done;
2964 for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
2965 BasicBlock *BB = I.getSuccessor(i);
2966 bool Inserted = Done.insert(BB).second;
2967 if (!Inserted)
2968 continue;
2969
2970 MachineBasicBlock *Succ = FuncInfo.MBBMap[BB];
2971 addSuccessorWithProb(IndirectBrMBB, Succ);
2972 }
2973 IndirectBrMBB->normalizeSuccProbs();
2974
2975 DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(),
2976 MVT::Other, getControlRoot(),
2977 getValue(I.getAddress())));
2978}
2979
2980void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
2981 if (!DAG.getTarget().Options.TrapUnreachable)
2982 return;
2983
2984 // We may be able to ignore unreachable behind a noreturn call.
2985 if (DAG.getTarget().Options.NoTrapAfterNoreturn) {
2986 const BasicBlock &BB = *I.getParent();
2987 if (&I != &BB.front()) {
2988 BasicBlock::const_iterator PredI =
2989 std::prev(BasicBlock::const_iterator(&I));
2990 if (const CallInst *Call = dyn_cast<CallInst>(&*PredI)) {
2991 if (Call->doesNotReturn())
2992 return;
2993 }
2994 }
2995 }
2996
2997 DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
2998}
2999
3000void SelectionDAGBuilder::visitUnary(const User &I, unsigned Opcode) {
3001 SDNodeFlags Flags;
3002
3003 SDValue Op = getValue(I.getOperand(0));
3004 SDValue UnNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op.getValueType(),
3005 Op, Flags);
3006 setValue(&I, UnNodeValue);
3007}
3008
3009void SelectionDAGBuilder::visitBinary(const User &I, unsigned Opcode) {
3010 SDNodeFlags Flags;
3011 if (auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&I)) {
3012 Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3013 Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3014 }
3015 if (auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I))
3016 Flags.setExact(ExactOp->isExact());
3017 if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3018 Flags.copyFMF(*FPOp);
3019
3020 SDValue Op1 = getValue(I.getOperand(0));
3021 SDValue Op2 = getValue(I.getOperand(1));
3022 SDValue BinNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(),
3023 Op1, Op2, Flags);
3024 setValue(&I, BinNodeValue);
3025}
3026
3027void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
3028 SDValue Op1 = getValue(I.getOperand(0));
3029 SDValue Op2 = getValue(I.getOperand(1));
3030
3031 EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy(
3032 Op1.getValueType(), DAG.getDataLayout());
3033
3034 // Coerce the shift amount to the right type if we can.
3035 if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
3036 unsigned ShiftSize = ShiftTy.getSizeInBits();
3037 unsigned Op2Size = Op2.getValueSizeInBits();
3038 SDLoc DL = getCurSDLoc();
3039
3040 // If the operand is smaller than the shift count type, promote it.
3041 if (ShiftSize > Op2Size)
3042 Op2 = DAG.getNode(ISD::ZERO_EXTEND, DL, ShiftTy, Op2);
3043
3044 // If the operand is larger than the shift count type but the shift
3045 // count type has enough bits to represent any shift value, truncate
3046 // it now. This is a common case and it exposes the truncate to
3047 // optimization early.
3048 else if (ShiftSize >= Log2_32_Ceil(Op2.getValueSizeInBits()))
3049 Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2);
3050 // Otherwise we'll need to temporarily settle for some other convenient
3051 // type. Type legalization will make adjustments once the shiftee is split.
3052 else
3053 Op2 = DAG.getZExtOrTrunc(Op2, DL, MVT::i32);
3054 }
3055
3056 bool nuw = false;
3057 bool nsw = false;
3058 bool exact = false;
3059
3060 if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
3061
3062 if (const OverflowingBinaryOperator *OFBinOp =
3063 dyn_cast<const OverflowingBinaryOperator>(&I)) {
3064 nuw = OFBinOp->hasNoUnsignedWrap();
3065 nsw = OFBinOp->hasNoSignedWrap();
3066 }
3067 if (const PossiblyExactOperator *ExactOp =
3068 dyn_cast<const PossiblyExactOperator>(&I))
3069 exact = ExactOp->isExact();
3070 }
3071 SDNodeFlags Flags;
3072 Flags.setExact(exact);
3073 Flags.setNoSignedWrap(nsw);
3074 Flags.setNoUnsignedWrap(nuw);
3075 SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
3076 Flags);
3077 setValue(&I, Res);
3078}
3079
3080void SelectionDAGBuilder::visitSDiv(const User &I) {
3081 SDValue Op1 = getValue(I.getOperand(0));
3082 SDValue Op2 = getValue(I.getOperand(1));
3083
3084 SDNodeFlags Flags;
3085 Flags.setExact(isa<PossiblyExactOperator>(&I) &&
3086 cast<PossiblyExactOperator>(&I)->isExact());
3087 setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1,
3088 Op2, Flags));
3089}
3090
3091void SelectionDAGBuilder::visitICmp(const User &I) {
3092 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
3093 if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
3094 predicate = IC->getPredicate();
3095 else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
3096 predicate = ICmpInst::Predicate(IC->getPredicate());
3097 SDValue Op1 = getValue(I.getOperand(0));
3098 SDValue Op2 = getValue(I.getOperand(1));
3099 ISD::CondCode Opcode = getICmpCondCode(predicate);
3100
3101 auto &TLI = DAG.getTargetLoweringInfo();
3102 EVT MemVT =
3103 TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3104
3105 // If a pointer's DAG type is larger than its memory type then the DAG values
3106 // are zero-extended. This breaks signed comparisons so truncate back to the
3107 // underlying type before doing the compare.
3108 if (Op1.getValueType() != MemVT) {
3109 Op1 = DAG.getPtrExtOrTrunc(Op1, getCurSDLoc(), MemVT);
3110 Op2 = DAG.getPtrExtOrTrunc(Op2, getCurSDLoc(), MemVT);
3111 }
3112
3113 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3114 I.getType());
3115 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
3116}
3117
3118void SelectionDAGBuilder::visitFCmp(const User &I) {
3119 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
3120 if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I))
3121 predicate = FC->getPredicate();
3122 else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
3123 predicate = FCmpInst::Predicate(FC->getPredicate());
3124 SDValue Op1 = getValue(I.getOperand(0));
3125 SDValue Op2 = getValue(I.getOperand(1));
3126
3127 ISD::CondCode Condition = getFCmpCondCode(predicate);
3128 auto *FPMO = cast<FPMathOperator>(&I);
3129 if (FPMO->hasNoNaNs() || TM.Options.NoNaNsFPMath)
3130 Condition = getFCmpCodeWithoutNaN(Condition);
3131
3132 SDNodeFlags Flags;
3133 Flags.copyFMF(*FPMO);
3134 SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
3135
3136 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3137 I.getType());
3138 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
3139}
3140
3141// Check if the condition of the select has one use or two users that are both
3142// selects with the same condition.
3143static bool hasOnlySelectUsers(const Value *Cond) {
3144 return llvm::all_of(Cond->users(), [](const Value *V) {
3145 return isa<SelectInst>(V);
3146 });
3147}
3148
3149void SelectionDAGBuilder::visitSelect(const User &I) {
3150 SmallVector<EVT, 4> ValueVTs;
3151 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
3152 ValueVTs);
3153 unsigned NumValues = ValueVTs.size();
3154 if (NumValues == 0) return;
3155
3156 SmallVector<SDValue, 4> Values(NumValues);
3157 SDValue Cond = getValue(I.getOperand(0));
3158 SDValue LHSVal = getValue(I.getOperand(1));
3159 SDValue RHSVal = getValue(I.getOperand(2));
3160 SmallVector<SDValue, 1> BaseOps(1, Cond);
3161 ISD::NodeType OpCode =
3162 Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT;
3163
3164 bool IsUnaryAbs = false;
3165 bool Negate = false;
3166
3167 SDNodeFlags Flags;
3168 if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3169 Flags.copyFMF(*FPOp);
3170
3171 // Min/max matching is only viable if all output VTs are the same.
3172 if (is_splat(ValueVTs)) {
3173 EVT VT = ValueVTs[0];
3174 LLVMContext &Ctx = *DAG.getContext();
3175 auto &TLI = DAG.getTargetLoweringInfo();
3176
3177 // We care about the legality of the operation after it has been type
3178 // legalized.
3179 while (TLI.getTypeAction(Ctx, VT) != TargetLoweringBase::TypeLegal)
3180 VT = TLI.getTypeToTransformTo(Ctx, VT);
3181
3182 // If the vselect is legal, assume we want to leave this as a vector setcc +
3183 // vselect. Otherwise, if this is going to be scalarized, we want to see if
3184 // min/max is legal on the scalar type.
3185 bool UseScalarMinMax = VT.isVector() &&
3186 !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT);
3187
3188 Value *LHS, *RHS;
3189 auto SPR = matchSelectPattern(const_cast<User*>(&I), LHS, RHS);
3190 ISD::NodeType Opc = ISD::DELETED_NODE;
3191 switch (SPR.Flavor) {
3192 case SPF_UMAX: Opc = ISD::UMAX; break;
3193 case SPF_UMIN: Opc = ISD::UMIN; break;
3194 case SPF_SMAX: Opc = ISD::SMAX; break;
3195 case SPF_SMIN: Opc = ISD::SMIN; break;
3196 case SPF_FMINNUM:
3197 switch (SPR.NaNBehavior) {
3198 case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3199 case SPNB_RETURNS_NAN: Opc = ISD::FMINIMUM; break;
3200 case SPNB_RETURNS_OTHER: Opc = ISD::FMINNUM; break;
3201 case SPNB_RETURNS_ANY: {
3202 if (TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT))
3203 Opc = ISD::FMINNUM;
3204 else if (TLI.isOperationLegalOrCustom(ISD::FMINIMUM, VT))
3205 Opc = ISD::FMINIMUM;
3206 else if (UseScalarMinMax)
3207 Opc = TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT.getScalarType()) ?
3208 ISD::FMINNUM : ISD::FMINIMUM;
3209 break;
3210 }
3211 }
3212 break;
3213 case SPF_FMAXNUM:
3214 switch (SPR.NaNBehavior) {
3215 case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3216 case SPNB_RETURNS_NAN: Opc = ISD::FMAXIMUM; break;
3217 case SPNB_RETURNS_OTHER: Opc = ISD::FMAXNUM; break;
3218 case SPNB_RETURNS_ANY:
3219
3220 if (TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT))
3221 Opc = ISD::FMAXNUM;
3222 else if (TLI.isOperationLegalOrCustom(ISD::FMAXIMUM, VT))
3223 Opc = ISD::FMAXIMUM;
3224 else if (UseScalarMinMax)
3225 Opc = TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT.getScalarType()) ?
3226 ISD::FMAXNUM : ISD::FMAXIMUM;
3227 break;
3228 }
3229 break;
3230 case SPF_NABS:
3231 Negate = true;
3232 LLVM_FALLTHROUGH;
3233 case SPF_ABS:
3234 IsUnaryAbs = true;
3235 Opc = ISD::ABS;
3236 break;
3237 default: break;
3238 }
3239
3240 if (!IsUnaryAbs && Opc != ISD::DELETED_NODE &&
3241 (TLI.isOperationLegalOrCustom(Opc, VT) ||
3242 (UseScalarMinMax &&
3243 TLI.isOperationLegalOrCustom(Opc, VT.getScalarType()))) &&
3244 // If the underlying comparison instruction is used by any other
3245 // instruction, the consumed instructions won't be destroyed, so it is
3246 // not profitable to convert to a min/max.
3247 hasOnlySelectUsers(cast<SelectInst>(I).getCondition())) {
3248 OpCode = Opc;
3249 LHSVal = getValue(LHS);
3250 RHSVal = getValue(RHS);
3251 BaseOps.clear();
3252 }
3253
3254 if (IsUnaryAbs) {
3255 OpCode = Opc;
3256 LHSVal = getValue(LHS);
3257 BaseOps.clear();
3258 }
3259 }
3260
3261 if (IsUnaryAbs) {
3262 for (unsigned i = 0; i != NumValues; ++i) {
3263 SDLoc dl = getCurSDLoc();
3264 EVT VT = LHSVal.getNode()->getValueType(LHSVal.getResNo() + i);
3265 Values[i] =
3266 DAG.getNode(OpCode, dl, VT, LHSVal.getValue(LHSVal.getResNo() + i));
3267 if (Negate)
3268 Values[i] = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT),
3269 Values[i]);
3270 }
3271 } else {
3272 for (unsigned i = 0; i != NumValues; ++i) {
3273 SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end());
3274 Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
3275 Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i));
3276 Values[i] = DAG.getNode(
3277 OpCode, getCurSDLoc(),
3278 LHSVal.getNode()->getValueType(LHSVal.getResNo() + i), Ops, Flags);
3279 }
3280 }
3281
3282 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3283 DAG.getVTList(ValueVTs), Values));
3284}
3285
3286void SelectionDAGBuilder::visitTrunc(const User &I) {
3287 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
3288 SDValue N = getValue(I.getOperand(0));
3289 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3290 I.getType());
3291 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N));
3292}
3293
3294void SelectionDAGBuilder::visitZExt(const User &I) {
3295 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3296 // ZExt also can't be a cast to bool for same reason. So, nothing much to do
3297 SDValue N = getValue(I.getOperand(0));
3298 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3299 I.getType());
3300 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N));
3301}
3302
3303void SelectionDAGBuilder::visitSExt(const User &I) {
3304 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3305 // SExt also can't be a cast to bool for same reason. So, nothing much to do
3306 SDValue N = getValue(I.getOperand(0));
3307 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3308 I.getType());
3309 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
3310}
3311
3312void SelectionDAGBuilder::visitFPTrunc(const User &I) {
3313 // FPTrunc is never a no-op cast, no need to check
3314 SDValue N = getValue(I.getOperand(0));
3315 SDLoc dl = getCurSDLoc();
3316 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3317 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3318 setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N,
3319 DAG.getTargetConstant(
3320 0, dl, TLI.getPointerTy(DAG.getDataLayout()))));
3321}
3322
3323void SelectionDAGBuilder::visitFPExt(const User &I) {
3324 // FPExt is never a no-op cast, no need to check
3325 SDValue N = getValue(I.getOperand(0));
3326 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3327 I.getType());
3328 setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
3329}
3330
3331void SelectionDAGBuilder::visitFPToUI(const User &I) {
3332 // FPToUI is never a no-op cast, no need to check
3333 SDValue N = getValue(I.getOperand(0));
3334 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3335 I.getType());
3336 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
3337}
3338
3339void SelectionDAGBuilder::visitFPToSI(const User &I) {
3340 // FPToSI is never a no-op cast, no need to check
3341 SDValue N = getValue(I.getOperand(0));
3342 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3343 I.getType());
3344 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
3345}
3346
3347void SelectionDAGBuilder::visitUIToFP(const User &I) {
3348 // UIToFP is never a no-op cast, no need to check
3349 SDValue N = getValue(I.getOperand(0));
3350 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3351 I.getType());
3352 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N));
3353}
3354
3355void SelectionDAGBuilder::visitSIToFP(const User &I) {
3356 // SIToFP is never a no-op cast, no need to check
3357 SDValue N = getValue(I.getOperand(0));
3358 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3359 I.getType());
3360 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
3361}
3362
3363void SelectionDAGBuilder::visitPtrToInt(const User &I) {
3364 // What to do depends on the size of the integer and the size of the pointer.
3365 // We can either truncate, zero extend, or no-op, accordingly.
3366 SDValue N = getValue(I.getOperand(0));
3367 auto &TLI = DAG.getTargetLoweringInfo();
3368 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3369 I.getType());
3370 EVT PtrMemVT =
3371 TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3372 N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3373 N = DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT);
3374 setValue(&I, N);
3375}
3376
3377void SelectionDAGBuilder::visitIntToPtr(const User &I) {
3378 // What to do depends on the size of the integer and the size of the pointer.
3379 // We can either truncate, zero extend, or no-op, accordingly.
3380 SDValue N = getValue(I.getOperand(0));
3381 auto &TLI = DAG.getTargetLoweringInfo();
3382 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3383 EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
3384 N = DAG.getZExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3385 N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), DestVT);
3386 setValue(&I, N);
3387}
3388
3389void SelectionDAGBuilder::visitBitCast(const User &I) {
3390 SDValue N = getValue(I.getOperand(0));
3391 SDLoc dl = getCurSDLoc();
3392 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3393 I.getType());
3394
3395 // BitCast assures us that source and destination are the same size so this is
3396 // either a BITCAST or a no-op.
3397 if (DestVT != N.getValueType())
3398 setValue(&I, DAG.getNode(ISD::BITCAST, dl,
3399 DestVT, N)); // convert types.
3400 // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
3401 // might fold any kind of constant expression to an integer constant and that
3402 // is not what we are looking for. Only recognize a bitcast of a genuine
3403 // constant integer as an opaque constant.
3404 else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
3405 setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false,
3406 /*isOpaque*/true));
3407 else
3408 setValue(&I, N); // noop cast.
3409}
3410
3411void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
3412 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3413 const Value *SV = I.getOperand(0);
3414 SDValue N = getValue(SV);
3415 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3416
3417 unsigned SrcAS = SV->getType()->getPointerAddressSpace();
3418 unsigned DestAS = I.getType()->getPointerAddressSpace();
3419
3420 if (!TM.isNoopAddrSpaceCast(SrcAS, DestAS))
3421 N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
3422
3423 setValue(&I, N);
3424}
3425
3426void SelectionDAGBuilder::visitInsertElement(const User &I) {
3427 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3428 SDValue InVec = getValue(I.getOperand(0));
3429 SDValue InVal = getValue(I.getOperand(1));
3430 SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(),
3431 TLI.getVectorIdxTy(DAG.getDataLayout()));
3432 setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
3433 TLI.getValueType(DAG.getDataLayout(), I.getType()),
3434 InVec, InVal, InIdx));
3435}
3436
3437void SelectionDAGBuilder::visitExtractElement(const User &I) {
3438 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3439 SDValue InVec = getValue(I.getOperand(0));
3440 SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(),
3441 TLI.getVectorIdxTy(DAG.getDataLayout()));
3442 setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
3443 TLI.getValueType(DAG.getDataLayout(), I.getType()),
3444 InVec, InIdx));
3445}
3446
3447void SelectionDAGBuilder::visitShuffleVector(const User &I) {
3448 SDValue Src1 = getValue(I.getOperand(0));
3449 SDValue Src2 = getValue(I.getOperand(1));
3450 ArrayRef<int> Mask;
3451 if (auto *SVI = dyn_cast<ShuffleVectorInst>(&I))
3452 Mask = SVI->getShuffleMask();
3453 else
3454 Mask = cast<ConstantExpr>(I).getShuffleMask();
3455 SDLoc DL = getCurSDLoc();
3456 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3457 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3458 EVT SrcVT = Src1.getValueType();
3459
3460 if (all_of(Mask, [](int Elem) { return Elem == 0; }) &&
3461 VT.isScalableVector()) {
3462 // Canonical splat form of first element of first input vector.
3463 SDValue FirstElt =
3464 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT.getScalarType(), Src1,
3465 DAG.getVectorIdxConstant(0, DL));
3466 setValue(&I, DAG.getNode(ISD::SPLAT_VECTOR, DL, VT, FirstElt));
3467 return;
3468 }
3469
3470 // For now, we only handle splats for scalable vectors.
3471 // The DAGCombiner will perform a BUILD_VECTOR -> SPLAT_VECTOR transformation
3472 // for targets that support a SPLAT_VECTOR for non-scalable vector types.
3473 assert(!VT.isScalableVector() && "Unsupported scalable vector shuffle");
3474
3475 unsigned SrcNumElts = SrcVT.getVectorNumElements();
3476 unsigned MaskNumElts = Mask.size();
3477
3478 if (SrcNumElts == MaskNumElts) {
3479 setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, Mask));
3480 return;
3481 }
3482
3483 // Normalize the shuffle vector since mask and vector length don't match.
3484 if (SrcNumElts < MaskNumElts) {
3485 // Mask is longer than the source vectors. We can use concatenate vector to
3486 // make the mask and vectors lengths match.
3487
3488 if (MaskNumElts % SrcNumElts == 0) {
3489 // Mask length is a multiple of the source vector length.
3490 // Check if the shuffle is some kind of concatenation of the input
3491 // vectors.
3492 unsigned NumConcat = MaskNumElts / SrcNumElts;
3493 bool IsConcat = true;
3494 SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
3495 for (unsigned i = 0; i != MaskNumElts; ++i) {
3496 int Idx = Mask[i];
3497 if (Idx < 0)
3498 continue;
3499 // Ensure the indices in each SrcVT sized piece are sequential and that
3500 // the same source is used for the whole piece.
3501 if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
3502 (ConcatSrcs[i / SrcNumElts] >= 0 &&
3503 ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) {
3504 IsConcat = false;
3505 break;
3506 }
3507 // Remember which source this index came from.
3508 ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
3509 }
3510
3511 // The shuffle is concatenating multiple vectors together. Just emit
3512 // a CONCAT_VECTORS operation.
3513 if (IsConcat) {
3514 SmallVector<SDValue, 8> ConcatOps;
3515 for (auto Src : ConcatSrcs) {
3516 if (Src < 0)
3517 ConcatOps.push_back(DAG.getUNDEF(SrcVT));
3518 else if (Src == 0)
3519 ConcatOps.push_back(Src1);
3520 else
3521 ConcatOps.push_back(Src2);
3522 }
3523 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps));
3524 return;
3525 }
3526 }
3527
3528 unsigned PaddedMaskNumElts = alignTo(MaskNumElts, SrcNumElts);
3529 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
3530 EVT PaddedVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
3531 PaddedMaskNumElts);
3532
3533 // Pad both vectors with undefs to make them the same length as the mask.
3534 SDValue UndefVal = DAG.getUNDEF(SrcVT);
3535
3536 SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
3537 SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
3538 MOps1[0] = Src1;
3539 MOps2[0] = Src2;
3540
3541 Src1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps1);
3542 Src2 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps2);
3543
3544 // Readjust mask for new input vector length.
3545 SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
3546 for (unsigned i = 0; i != MaskNumElts; ++i) {
3547 int Idx = Mask[i];
3548 if (Idx >= (int)SrcNumElts)
3549 Idx -= SrcNumElts - PaddedMaskNumElts;
3550 MappedOps[i] = Idx;
3551 }
3552
3553 SDValue Result = DAG.getVectorShuffle(PaddedVT, DL, Src1, Src2, MappedOps);
3554
3555 // If the concatenated vector was padded, extract a subvector with the
3556 // correct number of elements.
3557 if (MaskNumElts != PaddedMaskNumElts)
3558 Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Result,
3559 DAG.getVectorIdxConstant(0, DL));
3560
3561 setValue(&I, Result);
3562 return;
3563 }
3564
3565 if (SrcNumElts > MaskNumElts) {
3566 // Analyze the access pattern of the vector to see if we can extract
3567 // two subvectors and do the shuffle.
3568 int StartIdx[2] = { -1, -1 }; // StartIdx to extract from
3569 bool CanExtract = true;
3570 for (int Idx : Mask) {
3571 unsigned Input = 0;
3572 if (Idx < 0)
3573 continue;
3574
3575 if (Idx >= (int)SrcNumElts) {
3576 Input = 1;
3577 Idx -= SrcNumElts;
3578 }
3579
3580 // If all the indices come from the same MaskNumElts sized portion of
3581 // the sources we can use extract. Also make sure the extract wouldn't
3582 // extract past the end of the source.
3583 int NewStartIdx = alignDown(Idx, MaskNumElts);
3584 if (NewStartIdx + MaskNumElts > SrcNumElts ||
3585 (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
3586 CanExtract = false;
3587 // Make sure we always update StartIdx as we use it to track if all
3588 // elements are undef.
3589 StartIdx[Input] = NewStartIdx;
3590 }
3591
3592 if (StartIdx[0] < 0 && StartIdx[1] < 0) {
3593 setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
3594 return;
3595 }
3596 if (CanExtract) {
3597 // Extract appropriate subvector and generate a vector shuffle
3598 for (unsigned Input = 0; Input < 2; ++Input) {
3599 SDValue &Src = Input == 0 ? Src1 : Src2;
3600 if (StartIdx[Input] < 0)
3601 Src = DAG.getUNDEF(VT);
3602 else {
3603 Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src,
3604 DAG.getVectorIdxConstant(StartIdx[Input], DL));
3605 }
3606 }
3607
3608 // Calculate new mask.
3609 SmallVector<int, 8> MappedOps(Mask.begin(), Mask.end());
3610 for (int &Idx : MappedOps) {
3611 if (Idx >= (int)SrcNumElts)
3612 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
3613 else if (Idx >= 0)
3614 Idx -= StartIdx[0];
3615 }
3616
3617 setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, MappedOps));
3618 return;
3619 }
3620 }
3621
3622 // We can't use either concat vectors or extract subvectors so fall back to
3623 // replacing the shuffle with extract and build vector.
3624 // to insert and build vector.
3625 EVT EltVT = VT.getVectorElementType();
3626 SmallVector<SDValue,8> Ops;
3627 for (int Idx : Mask) {
3628 SDValue Res;
3629
3630 if (Idx < 0) {
3631 Res = DAG.getUNDEF(EltVT);
3632 } else {
3633 SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
3634 if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
3635
3636 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Src,
3637 DAG.getVectorIdxConstant(Idx, DL));
3638 }
3639
3640 Ops.push_back(Res);
3641 }
3642
3643 setValue(&I, DAG.getBuildVector(VT, DL, Ops));
3644}
3645
3646void SelectionDAGBuilder::visitInsertValue(const User &I) {
3647 ArrayRef<unsigned> Indices;
3648 if (const InsertValueInst *IV = dyn_cast<InsertValueInst>(&I))
3649 Indices = IV->getIndices();
3650 else
3651 Indices = cast<ConstantExpr>(&I)->getIndices();
3652
3653 const Value *Op0 = I.getOperand(0);
3654 const Value *Op1 = I.getOperand(1);
3655 Type *AggTy = I.getType();
3656 Type *ValTy = Op1->getType();
3657 bool IntoUndef = isa<UndefValue>(Op0);
3658 bool FromUndef = isa<UndefValue>(Op1);
3659
3660 unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
3661
3662 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3663 SmallVector<EVT, 4> AggValueVTs;
3664 ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs);
3665 SmallVector<EVT, 4> ValValueVTs;
3666 ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3667
3668 unsigned NumAggValues = AggValueVTs.size();
3669 unsigned NumValValues = ValValueVTs.size();
3670 SmallVector<SDValue, 4> Values(NumAggValues);
3671
3672 // Ignore an insertvalue that produces an empty object
3673 if (!NumAggValues) {
3674 setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3675 return;
3676 }
3677
3678 SDValue Agg = getValue(Op0);
3679 unsigned i = 0;
3680 // Copy the beginning value(s) from the original aggregate.
3681 for (; i != LinearIndex; ++i)
3682 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3683 SDValue(Agg.getNode(), Agg.getResNo() + i);
3684 // Copy values from the inserted value(s).
3685 if (NumValValues) {
3686 SDValue Val = getValue(Op1);
3687 for (; i != LinearIndex + NumValValues; ++i)
3688 Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3689 SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
3690 }
3691 // Copy remaining value(s) from the original aggregate.
3692 for (; i != NumAggValues; ++i)
3693 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3694 SDValue(Agg.getNode(), Agg.getResNo() + i);
3695
3696 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3697 DAG.getVTList(AggValueVTs), Values));
3698}
3699
3700void SelectionDAGBuilder::visitExtractValue(const User &I) {
3701 ArrayRef<unsigned> Indices;
3702 if (const ExtractValueInst *EV = dyn_cast<ExtractValueInst>(&I))
3703 Indices = EV->getIndices();
3704 else
3705 Indices = cast<ConstantExpr>(&I)->getIndices();
3706
3707 const Value *Op0 = I.getOperand(0);
3708 Type *AggTy = Op0->getType();
3709 Type *ValTy = I.getType();
3710 bool OutOfUndef = isa<UndefValue>(Op0);
3711
3712 unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
3713
3714 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3715 SmallVector<EVT, 4> ValValueVTs;
3716 ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3717
3718 unsigned NumValValues = ValValueVTs.size();
3719
3720 // Ignore a extractvalue that produces an empty object
3721 if (!NumValValues) {
3722 setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3723 return;
3724 }
3725
3726 SmallVector<SDValue, 4> Values(NumValValues);
3727
3728 SDValue Agg = getValue(Op0);
3729 // Copy out the selected value(s).
3730 for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
3731 Values[i - LinearIndex] =
3732 OutOfUndef ?
3733 DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
3734 SDValue(Agg.getNode(), Agg.getResNo() + i);
3735
3736 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3737 DAG.getVTList(ValValueVTs), Values));
3738}
3739
3740void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
3741 Value *Op0 = I.getOperand(0);
3742 // Note that the pointer operand may be a vector of pointers. Take the scalar
3743 // element which holds a pointer.
3744 unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace();
3745 SDValue N = getValue(Op0);
3746 SDLoc dl = getCurSDLoc();
3747 auto &TLI = DAG.getTargetLoweringInfo();
3748
3749 // Normalize Vector GEP - all scalar operands should be converted to the
3750 // splat vector.
3751 bool IsVectorGEP = I.getType()->isVectorTy();
3752 ElementCount VectorElementCount =
3753 IsVectorGEP ? cast<VectorType>(I.getType())->getElementCount()
3754 : ElementCount::getFixed(0);
3755
3756 if (IsVectorGEP && !N.getValueType().isVector()) {
3757 LLVMContext &Context = *DAG.getContext();
3758 EVT VT = EVT::getVectorVT(Context, N.getValueType(), VectorElementCount);
3759 if (VectorElementCount.isScalable())
3760 N = DAG.getSplatVector(VT, dl, N);
3761 else
3762 N = DAG.getSplatBuildVector(VT, dl, N);
3763 }
3764
3765 for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I);
3766 GTI != E; ++GTI) {
3767 const Value *Idx = GTI.getOperand();
3768 if (StructType *StTy = GTI.getStructTypeOrNull()) {
3769 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
3770 if (Field) {
3771 // N = N + Offset
3772 uint64_t Offset = DL->getStructLayout(StTy)->getElementOffset(Field);
3773
3774 // In an inbounds GEP with an offset that is nonnegative even when
3775 // interpreted as signed, assume there is no unsigned overflow.
3776 SDNodeFlags Flags;
3777 if (int64_t(Offset) >= 0 && cast<GEPOperator>(I).isInBounds())
3778 Flags.setNoUnsignedWrap(true);
3779
3780 N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N,
3781 DAG.getConstant(Offset, dl, N.getValueType()), Flags);
3782 }
3783 } else {
3784 // IdxSize is the width of the arithmetic according to IR semantics.
3785 // In SelectionDAG, we may prefer to do arithmetic in a wider bitwidth
3786 // (and fix up the result later).
3787 unsigned IdxSize = DAG.getDataLayout().getIndexSizeInBits(AS);
3788 MVT IdxTy = MVT::getIntegerVT(IdxSize);
3789 TypeSize ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
3790 // We intentionally mask away the high bits here; ElementSize may not
3791 // fit in IdxTy.
3792 APInt ElementMul(IdxSize, ElementSize.getKnownMinSize());
3793 bool ElementScalable = ElementSize.isScalable();
3794
3795 // If this is a scalar constant or a splat vector of constants,
3796 // handle it quickly.
3797 const auto *C = dyn_cast<Constant>(Idx);
3798 if (C && isa<VectorType>(C->getType()))
3799 C = C->getSplatValue();
3800
3801 const auto *CI = dyn_cast_or_null<ConstantInt>(C);
3802 if (CI && CI->isZero())
3803 continue;
3804 if (CI && !ElementScalable) {
3805 APInt Offs = ElementMul * CI->getValue().sextOrTrunc(IdxSize);
3806 LLVMContext &Context = *DAG.getContext();
3807 SDValue OffsVal;
3808 if (IsVectorGEP)
3809 OffsVal = DAG.getConstant(
3810 Offs, dl, EVT::getVectorVT(Context, IdxTy, VectorElementCount));
3811 else
3812 OffsVal = DAG.getConstant(Offs, dl, IdxTy);
3813
3814 // In an inbounds GEP with an offset that is nonnegative even when
3815 // interpreted as signed, assume there is no unsigned overflow.
3816 SDNodeFlags Flags;
3817 if (Offs.isNonNegative() && cast<GEPOperator>(I).isInBounds())
3818 Flags.setNoUnsignedWrap(true);
3819
3820 OffsVal = DAG.getSExtOrTrunc(OffsVal, dl, N.getValueType());
3821
3822 N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal, Flags);
3823 continue;
3824 }
3825
3826 // N = N + Idx * ElementMul;
3827 SDValue IdxN = getValue(Idx);
3828
3829 if (!IdxN.getValueType().isVector() && IsVectorGEP) {
3830 EVT VT = EVT::getVectorVT(*Context, IdxN.getValueType(),
3831 VectorElementCount);
3832 if (VectorElementCount.isScalable())
3833 IdxN = DAG.getSplatVector(VT, dl, IdxN);
3834 else
3835 IdxN = DAG.getSplatBuildVector(VT, dl, IdxN);
3836 }
3837
3838 // If the index is smaller or larger than intptr_t, truncate or extend
3839 // it.
3840 IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType());
3841
3842 if (ElementScalable) {
3843 EVT VScaleTy = N.getValueType().getScalarType();
3844 SDValue VScale = DAG.getNode(
3845 ISD::VSCALE, dl, VScaleTy,
3846 DAG.getConstant(ElementMul.getZExtValue(), dl, VScaleTy));
3847 if (IsVectorGEP)
3848 VScale = DAG.getSplatVector(N.getValueType(), dl, VScale);
3849 IdxN = DAG.getNode(ISD::MUL, dl, N.getValueType(), IdxN, VScale);
3850 } else {
3851 // If this is a multiply by a power of two, turn it into a shl
3852 // immediately. This is a very common case.
3853 if (ElementMul != 1) {
3854 if (ElementMul.isPowerOf2()) {
3855 unsigned Amt = ElementMul.logBase2();
3856 IdxN = DAG.getNode(ISD::SHL, dl,
3857 N.getValueType(), IdxN,
3858 DAG.getConstant(Amt, dl, IdxN.getValueType()));
3859 } else {
3860 SDValue Scale = DAG.getConstant(ElementMul.getZExtValue(), dl,
3861 IdxN.getValueType());
3862 IdxN = DAG.getNode(ISD::MUL, dl,
3863 N.getValueType(), IdxN, Scale);
3864 }
3865 }
3866 }
3867
3868 N = DAG.getNode(ISD::ADD, dl,
3869 N.getValueType(), N, IdxN);
3870 }
3871 }
3872
3873 MVT PtrTy = TLI.getPointerTy(DAG.getDataLayout(), AS);
3874 MVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout(), AS);
3875 if (IsVectorGEP) {
3876 PtrTy = MVT::getVectorVT(PtrTy, VectorElementCount);
3877 PtrMemTy = MVT::getVectorVT(PtrMemTy, VectorElementCount);
3878 }
3879
3880 if (PtrMemTy != PtrTy && !cast<GEPOperator>(I).isInBounds())
3881 N = DAG.getPtrExtendInReg(N, dl, PtrMemTy);
3882
3883 setValue(&I, N);
3884}
3885
3886void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
3887 // If this is a fixed sized alloca in the entry block of the function,
3888 // allocate it statically on the stack.
3889 if (FuncInfo.StaticAllocaMap.count(&I))
3890 return; // getValue will auto-populate this.
3891
3892 SDLoc dl = getCurSDLoc();
3893 Type *Ty = I.getAllocatedType();
3894 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3895 auto &DL = DAG.getDataLayout();
3896 uint64_t TySize = DL.getTypeAllocSize(Ty);
3897 MaybeAlign Alignment = std::max(DL.getPrefTypeAlign(Ty), I.getAlign());
3898
3899 SDValue AllocSize = getValue(I.getArraySize());
3900
3901 EVT IntPtr = TLI.getPointerTy(DAG.getDataLayout(), DL.getAllocaAddrSpace());
3902 if (AllocSize.getValueType() != IntPtr)
3903 AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
3904
3905 AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr,
3906 AllocSize,
3907 DAG.getConstant(TySize, dl, IntPtr));
3908
3909 // Handle alignment. If the requested alignment is less than or equal to
3910 // the stack alignment, ignore it. If the size is greater than or equal to
3911 // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
3912 Align StackAlign = DAG.getSubtarget().getFrameLowering()->getStackAlign();
3913 if (*Alignment <= StackAlign)
3914 Alignment = None;
3915
3916 const uint64_t StackAlignMask = StackAlign.value() - 1U;
3917 // Round the size of the allocation up to the stack alignment size
3918 // by add SA-1 to the size. This doesn't overflow because we're computing
3919 // an address inside an alloca.
3920 SDNodeFlags Flags;
3921 Flags.setNoUnsignedWrap(true);
3922 AllocSize = DAG.getNode(ISD::ADD, dl, AllocSize.getValueType(), AllocSize,
3923 DAG.getConstant(StackAlignMask, dl, IntPtr), Flags);
3924
3925 // Mask out the low bits for alignment purposes.
3926 AllocSize = DAG.getNode(ISD::AND, dl, AllocSize.getValueType(), AllocSize,
3927 DAG.getConstant(~StackAlignMask, dl, IntPtr));
3928
3929 SDValue Ops[] = {
3930 getRoot(), AllocSize,
3931 DAG.getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
3932 SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
3933 SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops);
3934 setValue(&I, DSA);
3935 DAG.setRoot(DSA.getValue(1));
3936
3937 assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects());
3938}
3939
3940void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
3941 if (I.isAtomic())
3942 return visitAtomicLoad(I);
3943
3944 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3945 const Value *SV = I.getOperand(0);
3946 if (TLI.supportSwiftError()) {
3947 // Swifterror values can come from either a function parameter with
3948 // swifterror attribute or an alloca with swifterror attribute.
3949 if (const Argument *Arg = dyn_cast<Argument>(SV)) {
3950 if (Arg->hasSwiftErrorAttr())
3951 return visitLoadFromSwiftError(I);
3952 }
3953
3954 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
3955 if (Alloca->isSwiftError())
3956 return visitLoadFromSwiftError(I);
3957 }
3958 }
3959
3960 SDValue Ptr = getValue(SV);
3961
3962 Type *Ty = I.getType();
3963 Align Alignment = I.getAlign();
3964
3965 AAMDNodes AAInfo;
3966 I.getAAMetadata(AAInfo);
3967 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
3968
3969 SmallVector<EVT, 4> ValueVTs, MemVTs;
3970 SmallVector<uint64_t, 4> Offsets;
3971 ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &MemVTs, &Offsets);
3972 unsigned NumValues = ValueVTs.size();
3973 if (NumValues == 0)
3974 return;
3975
3976 bool isVolatile = I.isVolatile();
3977
3978 SDValue Root;
3979 bool ConstantMemory = false;
3980 if (isVolatile)
3981 // Serialize volatile loads with other side effects.
3982 Root = getRoot();
3983 else if (NumValues > MaxParallelChains)
3984 Root = getMemoryRoot();
3985 else if (AA &&
3986 AA->pointsToConstantMemory(MemoryLocation(
3987 SV,
3988 LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
3989 AAInfo))) {
3990 // Do not serialize (non-volatile) loads of constant memory with anything.
3991 Root = DAG.getEntryNode();
3992 ConstantMemory = true;
3993 } else {
3994 // Do not serialize non-volatile loads against each other.
3995 Root = DAG.getRoot();
3996 }
3997
3998 SDLoc dl = getCurSDLoc();
3999
4000 if (isVolatile)
4001 Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG);
4002
4003 // An aggregate load cannot wrap around the address space, so offsets to its
4004 // parts don't wrap either.
4005 SDNodeFlags Flags;
4006 Flags.setNoUnsignedWrap(true);
4007
4008 SmallVector<SDValue, 4> Values(NumValues);
4009 SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4010 EVT PtrVT = Ptr.getValueType();
4011
4012 MachineMemOperand::Flags MMOFlags
4013 = TLI.getLoadMemOperandFlags(I, DAG.getDataLayout());
4014
4015 unsigned ChainI = 0;
4016 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4017 // Serializing loads here may result in excessive register pressure, and
4018 // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
4019 // could recover a bit by hoisting nodes upward in the chain by recognizing
4020 // they are side-effect free or do not alias. The optimizer should really
4021 // avoid this case by converting large object/array copies to llvm.memcpy
4022 // (MaxParallelChains should always remain as failsafe).
4023 if (ChainI == MaxParallelChains) {
4024 assert(PendingLoads.empty() && "PendingLoads must be serialized first");
4025 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4026 makeArrayRef(Chains.data(), ChainI));
4027 Root = Chain;
4028 ChainI = 0;
4029 }
4030 SDValue A = DAG.getNode(ISD::ADD, dl,
4031 PtrVT, Ptr,
4032 DAG.getConstant(Offsets[i], dl, PtrVT),
4033 Flags);
4034
4035 SDValue L = DAG.getLoad(MemVTs[i], dl, Root, A,
4036 MachinePointerInfo(SV, Offsets[i]), Alignment,
4037 MMOFlags, AAInfo, Ranges);
4038 Chains[ChainI] = L.getValue(1);
4039
4040 if (MemVTs[i] != ValueVTs[i])
4041 L = DAG.getZExtOrTrunc(L, dl, ValueVTs[i]);
4042
4043 Values[i] = L;
4044 }
4045
4046 if (!ConstantMemory) {
4047 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4048 makeArrayRef(Chains.data(), ChainI));
4049 if (isVolatile)
4050 DAG.setRoot(Chain);
4051 else
4052 PendingLoads.push_back(Chain);
4053 }
4054
4055 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl,
4056 DAG.getVTList(ValueVTs), Values));
4057}
4058
4059void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) {
4060 assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4061 "call visitStoreToSwiftError when backend supports swifterror");
4062
4063 SmallVector<EVT, 4> ValueVTs;
4064 SmallVector<uint64_t, 4> Offsets;
4065 const Value *SrcV = I.getOperand(0);
4066 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4067 SrcV->getType(), ValueVTs, &Offsets);
4068 assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4069 "expect a single EVT for swifterror");
4070
4071 SDValue Src = getValue(SrcV);
4072 // Create a virtual register, then update the virtual register.
4073 Register VReg =
4074 SwiftError.getOrCreateVRegDefAt(&I, FuncInfo.MBB, I.getPointerOperand());
4075 // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue
4076 // Chain can be getRoot or getControlRoot.
4077 SDValue CopyNode = DAG.getCopyToReg(getRoot(), getCurSDLoc(), VReg,
4078 SDValue(Src.getNode(), Src.getResNo()));
4079 DAG.setRoot(CopyNode);
4080}
4081
4082void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) {
4083 assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4084 "call visitLoadFromSwiftError when backend supports swifterror");
4085
4086 assert(!I.isVolatile() &&
4087 !I.hasMetadata(LLVMContext::MD_nontemporal) &&
4088 !I.hasMetadata(LLVMContext::MD_invariant_load) &&
4089 "Support volatile, non temporal, invariant for load_from_swift_error");
4090
4091 const Value *SV = I.getOperand(0);
4092 Type *Ty = I.getType();
4093 AAMDNodes AAInfo;
4094 I.getAAMetadata(AAInfo);
4095 assert(
4096 (!AA ||
4097 !AA->pointsToConstantMemory(MemoryLocation(
4098 SV, LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
4099 AAInfo))) &&
4100 "load_from_swift_error should not be constant memory");
4101
4102 SmallVector<EVT, 4> ValueVTs;
4103 SmallVector<uint64_t, 4> Offsets;
4104 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), Ty,
4105 ValueVTs, &Offsets);
4106 assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4107 "expect a single EVT for swifterror");
4108
4109 // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT
4110 SDValue L = DAG.getCopyFromReg(
4111 getRoot(), getCurSDLoc(),
4112 SwiftError.getOrCreateVRegUseAt(&I, FuncInfo.MBB, SV), ValueVTs[0]);
4113
4114 setValue(&I, L);
4115}
4116
4117void SelectionDAGBuilder::visitStore(const StoreInst &I) {
4118 if (I.isAtomic())
4119 return visitAtomicStore(I);
4120
4121 const Value *SrcV = I.getOperand(0);
4122 const Value *PtrV = I.getOperand(1);
4123
4124 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4125 if (TLI.supportSwiftError()) {
4126 // Swifterror values can come from either a function parameter with
4127 // swifterror attribute or an alloca with swifterror attribute.
4128 if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
4129 if (Arg->hasSwiftErrorAttr())
4130 return visitStoreToSwiftError(I);
4131 }
4132
4133 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
4134 if (Alloca->isSwiftError())
4135 return visitStoreToSwiftError(I);
4136 }
4137 }
4138
4139 SmallVector<EVT, 4> ValueVTs, MemVTs;
4140 SmallVector<uint64_t, 4> Offsets;
4141 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4142 SrcV->getType(), ValueVTs, &MemVTs, &Offsets);
4143 unsigned NumValues = ValueVTs.size();
4144 if (NumValues == 0)
4145 return;
4146
4147 // Get the lowered operands. Note that we do this after
4148 // checking if NumResults is zero, because with zero results
4149 // the operands won't have values in the map.
4150 SDValue Src = getValue(SrcV);
4151 SDValue Ptr = getValue(PtrV);
4152
4153 SDValue Root = I.isVolatile() ? getRoot() : getMemoryRoot();
4154 SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4155 SDLoc dl = getCurSDLoc();
4156 Align Alignment = I.getAlign();
4157 AAMDNodes AAInfo;
4158 I.getAAMetadata(AAInfo);
4159
4160 auto MMOFlags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
4161
4162 // An aggregate load cannot wrap around the address space, so offsets to its
4163 // parts don't wrap either.
4164 SDNodeFlags Flags;
4165 Flags.setNoUnsignedWrap(true);
4166
4167 unsigned ChainI = 0;
4168 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4169 // See visitLoad comments.
4170 if (ChainI == MaxParallelChains) {
4171 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4172 makeArrayRef(Chains.data(), ChainI));
4173 Root = Chain;
4174 ChainI = 0;
4175 }
4176 SDValue Add =
4177 DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(Offsets[i]), dl, Flags);
4178 SDValue Val = SDValue(Src.getNode(), Src.getResNo() + i);
4179 if (MemVTs[i] != ValueVTs[i])
4180 Val = DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]);
4181 SDValue St =
4182 DAG.getStore(Root, dl, Val, Add, MachinePointerInfo(PtrV, Offsets[i]),
4183 Alignment, MMOFlags, AAInfo);
4184 Chains[ChainI] = St;
4185 }
4186
4187 SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4188 makeArrayRef(Chains.data(), ChainI));
4189 DAG.setRoot(StoreNode);
4190}
4191
4192void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
4193 bool IsCompressing) {
4194 SDLoc sdl = getCurSDLoc();
4195
4196 auto getMaskedStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4197 MaybeAlign &Alignment) {
4198 // llvm.masked.store.*(Src0, Ptr, alignment, Mask)
4199 Src0 = I.getArgOperand(0);
4200 Ptr = I.getArgOperand(1);
4201 Alignment = cast<ConstantInt>(I.getArgOperand(2))->getMaybeAlignValue();
4202 Mask = I.getArgOperand(3);
4203 };
4204 auto getCompressingStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4205 MaybeAlign &Alignment) {
4206 // llvm.masked.compressstore.*(Src0, Ptr, Mask)
4207 Src0 = I.getArgOperand(0);
4208 Ptr = I.getArgOperand(1);
4209 Mask = I.getArgOperand(2);
4210 Alignment = None;
4211 };
4212
4213 Value *PtrOperand, *MaskOperand, *Src0Operand;
4214 MaybeAlign Alignment;
4215 if (IsCompressing)
4216 getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4217 else
4218 getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4219
4220 SDValue Ptr = getValue(PtrOperand);
4221 SDValue Src0 = getValue(Src0Operand);
4222 SDValue Mask = getValue(MaskOperand);
4223 SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4224
4225 EVT VT = Src0.getValueType();
4226 if (!Alignment)
4227 Alignment = DAG.getEVTAlign(VT);
4228
4229 AAMDNodes AAInfo;
4230 I.getAAMetadata(AAInfo);
4231
4232 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4233 MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
4234 // TODO: Make MachineMemOperands aware of scalable
4235 // vectors.
4236 VT.getStoreSize().getKnownMinSize(), *Alignment, AAInfo);
4237 SDValue StoreNode =
4238 DAG.getMaskedStore(getMemoryRoot(), sdl, Src0, Ptr, Offset, Mask, VT, MMO,
4239 ISD::UNINDEXED, false /* Truncating */, IsCompressing);
4240 DAG.setRoot(StoreNode);
4241 setValue(&I, StoreNode);
4242}
4243
4244// Get a uniform base for the Gather/Scatter intrinsic.
4245// The first argument of the Gather/Scatter intrinsic is a vector of pointers.
4246// We try to represent it as a base pointer + vector of indices.
4247// Usually, the vector of pointers comes from a 'getelementptr' instruction.
4248// The first operand of the GEP may be a single pointer or a vector of pointers
4249// Example:
4250// %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind
4251// or
4252// %gep.ptr = getelementptr i32, i32* %ptr, <8 x i32> %ind
4253// %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, ..
4254//
4255// When the first GEP operand is a single pointer - it is the uniform base we
4256// are looking for. If first operand of the GEP is a splat vector - we
4257// extract the splat value and use it as a uniform base.
4258// In all other cases the function returns 'false'.
4259static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index,
4260 ISD::MemIndexType &IndexType, SDValue &Scale,
4261 SelectionDAGBuilder *SDB, const BasicBlock *CurBB) {
4262 SelectionDAG& DAG = SDB->DAG;
4263 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4264 const DataLayout &DL = DAG.getDataLayout();
4265
4266 assert(Ptr->getType()->isVectorTy() && "Uexpected pointer type");
4267
4268 // Handle splat constant pointer.
4269 if (auto *C = dyn_cast<Constant>(Ptr)) {
4270 C = C->getSplatValue();
4271 if (!C)
4272 return false;
4273
4274 Base = SDB->getValue(C);
4275
4276 unsigned NumElts = cast<FixedVectorType>(Ptr->getType())->getNumElements();
4277 EVT VT = EVT::getVectorVT(*DAG.getContext(), TLI.getPointerTy(DL), NumElts);
4278 Index = DAG.getConstant(0, SDB->getCurSDLoc(), VT);
4279 IndexType = ISD::SIGNED_SCALED;
4280 Scale = DAG.getTargetConstant(1, SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4281 return true;
4282 }
4283
4284 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
4285 if (!GEP || GEP->getParent() != CurBB)
4286 return false;
4287
4288 if (GEP->getNumOperands() != 2)
4289 return false;
4290
4291 const Value *BasePtr = GEP->getPointerOperand();
4292 const Value *IndexVal = GEP->getOperand(GEP->getNumOperands() - 1);
4293
4294 // Make sure the base is scalar and the index is a vector.
4295 if (BasePtr->getType()->isVectorTy() || !IndexVal->getType()->isVectorTy())
4296 return false;
4297
4298 Base = SDB->getValue(BasePtr);
4299 Index = SDB->getValue(IndexVal);
4300 IndexType = ISD::SIGNED_SCALED;
4301 Scale = DAG.getTargetConstant(
4302 DL.getTypeAllocSize(GEP->getResultElementType()),
4303 SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4304 return true;
4305}
4306
4307void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
4308 SDLoc sdl = getCurSDLoc();
4309
4310 // llvm.masked.scatter.*(Src0, Ptrs, alignment, Mask)
4311 const Value *Ptr = I.getArgOperand(1);
4312 SDValue Src0 = getValue(I.getArgOperand(0));
4313 SDValue Mask = getValue(I.getArgOperand(3));
4314 EVT VT = Src0.getValueType();
4315 Align Alignment = cast<ConstantInt>(I.getArgOperand(2))
4316 ->getMaybeAlignValue()
4317 .getValueOr(DAG.getEVTAlign(VT));
4318 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4319
4320 AAMDNodes AAInfo;
4321 I.getAAMetadata(AAInfo);
4322
4323 SDValue Base;
4324 SDValue Index;
4325 ISD::MemIndexType IndexType;
4326 SDValue Scale;
4327 bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
4328 I.getParent());
4329
4330 unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
4331 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4332 MachinePointerInfo(AS), MachineMemOperand::MOStore,
4333 // TODO: Make MachineMemOperands aware of scalable
4334 // vectors.
4335 MemoryLocation::UnknownSize, Alignment, AAInfo);
4336 if (!UniformBase) {
4337 Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4338 Index = getValue(Ptr);
4339 IndexType = ISD::SIGNED_UNSCALED;
4340 Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4341 }
4342 SDValue Ops[] = { getMemoryRoot(), Src0, Mask, Base, Index, Scale };
4343 SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl,
4344 Ops, MMO, IndexType, false);
4345 DAG.setRoot(Scatter);
4346 setValue(&I, Scatter);
4347}
4348
4349void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
4350 SDLoc sdl = getCurSDLoc();
4351
4352 auto getMaskedLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4353 MaybeAlign &Alignment) {
4354 // @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
4355 Ptr = I.getArgOperand(0);
4356 Alignment = cast<ConstantInt>(I.getArgOperand(1))->getMaybeAlignValue();
4357 Mask = I.getArgOperand(2);
4358 Src0 = I.getArgOperand(3);
4359 };
4360 auto getExpandingLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4361 MaybeAlign &Alignment) {
4362 // @llvm.masked.expandload.*(Ptr, Mask, Src0)
4363 Ptr = I.getArgOperand(0);
4364 Alignment = None;
4365 Mask = I.getArgOperand(1);
4366 Src0 = I.getArgOperand(2);
4367 };
4368
4369 Value *PtrOperand, *MaskOperand, *Src0Operand;
4370 MaybeAlign Alignment;
4371 if (IsExpanding)
4372 getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4373 else
4374 getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4375
4376 SDValue Ptr = getValue(PtrOperand);
4377 SDValue Src0 = getValue(Src0Operand);
4378 SDValue Mask = getValue(MaskOperand);
4379 SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4380
4381 EVT VT = Src0.getValueType();
4382 if (!Alignment)
4383 Alignment = DAG.getEVTAlign(VT);
4384
4385 AAMDNodes AAInfo;
4386 I.getAAMetadata(AAInfo);
4387 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
4388
4389 // Do not serialize masked loads of constant memory with anything.
4390 MemoryLocation ML;
4391 if (VT.isScalableVector())
4392 ML = MemoryLocation::getAfter(PtrOperand);
4393 else
4394 ML = MemoryLocation(PtrOperand, LocationSize::precise(
4395 DAG.getDataLayout().getTypeStoreSize(I.getType())),
4396 AAInfo);
4397 bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
4398
4399 SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
4400
4401 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4402 MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad,
4403 // TODO: Make MachineMemOperands aware of scalable
4404 // vectors.
4405 VT.getStoreSize().getKnownMinSize(), *Alignment, AAInfo, Ranges);
4406
4407 SDValue Load =
4408 DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Offset, Mask, Src0, VT, MMO,
4409 ISD::UNINDEXED, ISD::NON_EXTLOAD, IsExpanding);
4410 if (AddToChain)
4411 PendingLoads.push_back(Load.getValue(1));
4412 setValue(&I, Load);
4413}
4414
4415void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
4416 SDLoc sdl = getCurSDLoc();
4417
4418 // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0)
4419 const Value *Ptr = I.getArgOperand(0);
4420 SDValue Src0 = getValue(I.getArgOperand(3));
4421 SDValue Mask = getValue(I.getArgOperand(2));
4422
4423 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4424 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4425 Align Alignment = cast<ConstantInt>(I.getArgOperand(1))
4426 ->getMaybeAlignValue()
4427 .getValueOr(DAG.getEVTAlign(VT));
4428
4429 AAMDNodes AAInfo;
4430 I.getAAMetadata(AAInfo);
4431 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
4432
4433 SDValue Root = DAG.getRoot();
4434 SDValue Base;
4435 SDValue Index;
4436 ISD::MemIndexType IndexType;
4437 SDValue Scale;
4438 bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
4439 I.getParent());
4440 unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
4441 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4442 MachinePointerInfo(AS), MachineMemOperand::MOLoad,
4443 // TODO: Make MachineMemOperands aware of scalable
4444 // vectors.
4445 MemoryLocation::UnknownSize, Alignment, AAInfo, Ranges);
4446
4447 if (!UniformBase) {
4448 Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4449 Index = getValue(Ptr);
4450 IndexType = ISD::SIGNED_UNSCALED;
4451 Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4452 }
4453 SDValue Ops[] = { Root, Src0, Mask, Base, Index, Scale };
4454 SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl,
4455 Ops, MMO, IndexType, ISD::NON_EXTLOAD);
4456
4457 PendingLoads.push_back(Gather.getValue(1));
4458 setValue(&I, Gather);
4459}
4460
4461void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
4462 SDLoc dl = getCurSDLoc();
4463 AtomicOrdering SuccessOrdering = I.getSuccessOrdering();
4464 AtomicOrdering FailureOrdering = I.getFailureOrdering();
4465 SyncScope::ID SSID = I.getSyncScopeID();
4466
4467 SDValue InChain = getRoot();
4468
4469 MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
4470 SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);
4471
4472 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4473 auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
4474
4475 MachineFunction &MF = DAG.getMachineFunction();
4476 MachineMemOperand *MMO = MF.getMachineMemOperand(
4477 MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4478 DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, SuccessOrdering,
4479 FailureOrdering);
4480
4481 SDValue L = DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS,
4482 dl, MemVT, VTs, InChain,
4483 getValue(I.getPointerOperand()),
4484 getValue(I.getCompareOperand()),
4485 getValue(I.getNewValOperand()), MMO);
4486
4487 SDValue OutChain = L.getValue(2);
4488
4489 setValue(&I, L);
4490 DAG.setRoot(OutChain);
4491}
4492
4493void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
4494 SDLoc dl = getCurSDLoc();
4495 ISD::NodeType NT;
4496 switch (I.getOperation()) {
4497 default: llvm_unreachable("Unknown atomicrmw operation");
4498 case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
4499 case AtomicRMWInst::Add: NT = ISD::ATOMIC_LOAD_ADD; break;
4500 case AtomicRMWInst::Sub: NT = ISD::ATOMIC_LOAD_SUB; break;
4501 case AtomicRMWInst::And: NT = ISD::ATOMIC_LOAD_AND; break;
4502 case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break;
4503 case AtomicRMWInst::Or: NT = ISD::ATOMIC_LOAD_OR; break;
4504 case AtomicRMWInst::Xor: NT = ISD::ATOMIC_LOAD_XOR; break;
4505 case AtomicRMWInst::Max: NT = ISD::ATOMIC_LOAD_MAX; break;
4506 case AtomicRMWInst::Min: NT = ISD::ATOMIC_LOAD_MIN; break;
4507 case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
4508 case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
4509 case AtomicRMWInst::FAdd: NT = ISD::ATOMIC_LOAD_FADD; break;
4510 case AtomicRMWInst::FSub: NT = ISD::ATOMIC_LOAD_FSUB; break;
4511 }
4512 AtomicOrdering Ordering = I.getOrdering();
4513 SyncScope::ID SSID = I.getSyncScopeID();
4514
4515 SDValue InChain = getRoot();
4516
4517 auto MemVT = getValue(I.getValOperand()).getSimpleValueType();
4518 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4519 auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
4520
4521 MachineFunction &MF = DAG.getMachineFunction();
4522 MachineMemOperand *MMO = MF.getMachineMemOperand(
4523 MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4524 DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, Ordering);
4525
4526 SDValue L =
4527 DAG.getAtomic(NT, dl, MemVT, InChain,
4528 getValue(I.getPointerOperand()), getValue(I.getValOperand()),
4529 MMO);
4530
4531 SDValue OutChain = L.getValue(1);
4532
4533 setValue(&I, L);
4534 DAG.setRoot(OutChain);
4535}
4536
4537void SelectionDAGBuilder::visitFence(const FenceInst &I) {
4538 SDLoc dl = getCurSDLoc();
4539 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4540 SDValue Ops[3];
4541 Ops[0] = getRoot();
4542 Ops[1] = DAG.getTargetConstant((unsigned)I.getOrdering(), dl,
4543 TLI.getFenceOperandTy(DAG.getDataLayout()));
4544 Ops[2] = DAG.getTargetConstant(I.getSyncScopeID(), dl,
4545 TLI.getFenceOperandTy(DAG.getDataLayout()));
4546 DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops));
4547}
4548
4549void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
4550 SDLoc dl = getCurSDLoc();
4551 AtomicOrdering Order = I.getOrdering();
4552 SyncScope::ID SSID = I.getSyncScopeID();
4553
4554 SDValue InChain = getRoot();
4555
4556 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4557 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4558 EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
4559
4560 if (!TLI.supportsUnalignedAtomics() &&
4561 I.getAlignment() < MemVT.getSizeInBits() / 8)
4562 report_fatal_error("Cannot generate unaligned atomic load");
4563
4564 auto Flags = TLI.getLoadMemOperandFlags(I, DAG.getDataLayout());
4565
4566 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4567 MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4568 I.getAlign(), AAMDNodes(), nullptr, SSID, Order);
4569
4570 InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG);
4571
4572 SDValue Ptr = getValue(I.getPointerOperand());
4573
4574 if (TLI.lowerAtomicLoadAsLoadSDNode(I)) {
4575 // TODO: Once this is better exercised by tests, it should be merged with
4576 // the normal path for loads to prevent future divergence.
4577 SDValue L = DAG.getLoad(MemVT, dl, InChain, Ptr, MMO);
4578 if (MemVT != VT)
4579 L = DAG.getPtrExtOrTrunc(L, dl, VT);
4580
4581 setValue(&I, L);
4582 SDValue OutChain = L.getValue(1);
4583 if (!I.isUnordered())
4584 DAG.setRoot(OutChain);
4585 else
4586 PendingLoads.push_back(OutChain);
4587 return;
4588 }
4589
4590 SDValue L = DAG.getAtomic(ISD::ATOMIC_LOAD, dl, MemVT, MemVT, InChain,
4591 Ptr, MMO);
4592
4593 SDValue OutChain = L.getValue(1);
4594 if (MemVT != VT)
4595 L = DAG.getPtrExtOrTrunc(L, dl, VT);
4596
4597 setValue(&I, L);
4598 DAG.setRoot(OutChain);
4599}
4600
4601void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
4602 SDLoc dl = getCurSDLoc();
4603
4604 AtomicOrdering Ordering = I.getOrdering();
4605 SyncScope::ID SSID = I.getSyncScopeID();
4606
4607 SDValue InChain = getRoot();
4608
4609 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4610 EVT MemVT =
4611 TLI.getMemValueType(DAG.getDataLayout(), I.getValueOperand()->getType());
4612
4613 if (I.getAlignment() < MemVT.getSizeInBits() / 8)
4614 report_fatal_error("Cannot generate unaligned atomic store");
4615
4616 auto Flags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
4617
4618 MachineFunction &MF = DAG.getMachineFunction();
4619 MachineMemOperand *MMO = MF.getMachineMemOperand(
4620 MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4621 I.getAlign(), AAMDNodes(), nullptr, SSID, Ordering);
4622
4623 SDValue Val = getValue(I.getValueOperand());
4624 if (Val.getValueType() != MemVT)
4625 Val = DAG.getPtrExtOrTrunc(Val, dl, MemVT);
4626 SDValue Ptr = getValue(I.getPointerOperand());
4627
4628 if (TLI.lowerAtomicStoreAsStoreSDNode(I)) {
4629 // TODO: Once this is better exercised by tests, it should be merged with
4630 // the normal path for stores to prevent future divergence.
4631 SDValue S = DAG.getStore(InChain, dl, Val, Ptr, MMO);
4632 DAG.setRoot(S);
4633 return;
4634 }
4635 SDValue OutChain = DAG.getAtomic(ISD::ATOMIC_STORE, dl, MemVT, InChain,
4636 Ptr, Val, MMO);
4637
4638
4639 DAG.setRoot(OutChain);
4640}
4641
4642/// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
4643/// node.
4644void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
4645 unsigned Intrinsic) {
4646 // Ignore the callsite's attributes. A specific call site may be marked with
4647 // readnone, but the lowering code will expect the chain based on the
4648 // definition.
4649 const Function *F = I.getCalledFunction();
4650 bool HasChain = !F->doesNotAccessMemory();
4651 bool OnlyLoad = HasChain && F->onlyReadsMemory();
4652
4653 // Build the operand list.
4654 SmallVector<SDValue, 8> Ops;
4655 if (HasChain) { // If this intrinsic has side-effects, chainify it.
4656 if (OnlyLoad) {
4657 // We don't need to serialize loads against other loads.
4658 Ops.push_back(DAG.getRoot());
4659 } else {
4660 Ops.push_back(getRoot());
4661 }
4662 }
4663
4664 // Info is set by getTgtMemInstrinsic
4665 TargetLowering::IntrinsicInfo Info;
4666 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4667 bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I,
4668 DAG.getMachineFunction(),
4669 Intrinsic);
4670
4671 // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
4672 if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
4673 Info.opc == ISD::INTRINSIC_W_CHAIN)
4674 Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(),
4675 TLI.getPointerTy(DAG.getDataLayout())));
4676
4677 // Add all operands of the call to the operand list.
4678 for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
4679 const Value *Arg = I.getArgOperand(i);
4680 if (!I.paramHasAttr(i, Attribute::ImmArg)) {
4681 Ops.push_back(getValue(Arg));
4682 continue;
4683 }
4684
4685 // Use TargetConstant instead of a regular constant for immarg.
4686 EVT VT = TLI.getValueType(*DL, Arg->getType(), true);
4687 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Arg)) {
4688 assert(CI->getBitWidth() <= 64 &&
4689 "large intrinsic immediates not handled");
4690 Ops.push_back(DAG.getTargetConstant(*CI, SDLoc(), VT));
4691 } else {
4692 Ops.push_back(
4693 DAG.getTargetConstantFP(*cast<ConstantFP>(Arg), SDLoc(), VT));
4694 }
4695 }
4696
4697 SmallVector<EVT, 4> ValueVTs;
4698 ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
4699
4700 if (HasChain)
4701 ValueVTs.push_back(MVT::Other);
4702
4703 SDVTList VTs = DAG.getVTList(ValueVTs);
4704
4705 // Create the node.
4706 SDValue Result;
4707 if (IsTgtIntrinsic) {
4708 // This is target intrinsic that touches memory
4709 AAMDNodes AAInfo;
4710 I.getAAMetadata(AAInfo);
4711 Result =
4712 DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(), VTs, Ops, Info.memVT,
4713 MachinePointerInfo(Info.ptrVal, Info.offset),
4714 Info.align, Info.flags, Info.size, AAInfo);
4715 } else if (!HasChain) {
4716 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
4717 } else if (!I.getType()->isVoidTy()) {
4718 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
4719 } else {
4720 Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
4721 }
4722
4723 if (HasChain) {
4724 SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
4725 if (OnlyLoad)
4726 PendingLoads.push_back(Chain);
4727 else
4728 DAG.setRoot(Chain);
4729 }
4730
4731 if (!I.getType()->isVoidTy()) {
4732 if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
4733 EVT VT = TLI.getValueType(DAG.getDataLayout(), PTy);
4734 Result = DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT, Result);
4735 } else
4736 Result = lowerRangeToAssertZExt(DAG, I, Result);
4737
4738 MaybeAlign Alignment = I.getRetAlign();
4739 if (!Alignment)
4740 Alignment = F->getAttributes().getRetAlignment();
4741 // Insert `assertalign` node if there's an alignment.
4742 if (InsertAssertAlign && Alignment) {
4743 Result =
4744 DAG.getAssertAlign(getCurSDLoc(), Result, Alignment.valueOrOne());
4745 }
4746
4747 setValue(&I, Result);
4748 }
4749}
4750
4751/// GetSignificand - Get the significand and build it into a floating-point
4752/// number with exponent of 1:
4753///
4754/// Op = (Op & 0x007fffff) | 0x3f800000;
4755///
4756/// where Op is the hexadecimal representation of floating point value.
4757static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl) {
4758 SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
4759 DAG.getConstant(0x007fffff, dl, MVT::i32));
4760 SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
4761 DAG.getConstant(0x3f800000, dl, MVT::i32));
4762 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
4763}
4764
4765/// GetExponent - Get the exponent:
4766///
4767/// (float)(int)(((Op & 0x7f800000) >> 23) - 127);
4768///
4769/// where Op is the hexadecimal representation of floating point value.
4770static SDValue GetExponent(SelectionDAG &DAG, SDValue Op,
4771 const TargetLowering &TLI, const SDLoc &dl) {
4772 SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
4773 DAG.getConstant(0x7f800000, dl, MVT::i32));
4774 SDValue t1 = DAG.getNode(
4775 ISD::SRL, dl, MVT::i32, t0,
4776 DAG.getConstant(23, dl, TLI.getPointerTy(DAG.getDataLayout())));
4777 SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
4778 DAG.getConstant(127, dl, MVT::i32));
4779 return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
4780}
4781
4782/// getF32Constant - Get 32-bit floating point constant.
4783static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt,
4784 const SDLoc &dl) {
4785 return DAG.getConstantFP(APFloat(APFloat::IEEEsingle(), APInt(32, Flt)), dl,
4786 MVT::f32);
4787}
4788
4789static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl,
4790 SelectionDAG &DAG) {
4791 // TODO: What fast-math-flags should be set on the floating-point nodes?
4792
4793 // IntegerPartOfX = ((int32_t)(t0);
4794 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
4795
4796 // FractionalPartOfX = t0 - (float)IntegerPartOfX;
4797 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
4798 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
4799
4800 // IntegerPartOfX <<= 23;
4801 IntegerPartOfX = DAG.getNode(
4802 ISD::SHL, dl, MVT::i32, IntegerPartOfX,
4803 DAG.getConstant(23, dl, DAG.getTargetLoweringInfo().getPointerTy(
4804 DAG.getDataLayout())));
4805
4806 SDValue TwoToFractionalPartOfX;
4807 if (LimitFloatPrecision <= 6) {
4808 // For floating-point precision of 6:
4809 //
4810 // TwoToFractionalPartOfX =
4811 // 0.997535578f +
4812 // (0.735607626f + 0.252464424f * x) * x;
4813 //
4814 // error 0.0144103317, which is 6 bits
4815 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4816 getF32Constant(DAG, 0x3e814304, dl));
4817 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4818 getF32Constant(DAG, 0x3f3c50c8, dl));
4819 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4820 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4821 getF32Constant(DAG, 0x3f7f5e7e, dl));
4822 } else if (LimitFloatPrecision <= 12) {
4823 // For floating-point precision of 12:
4824 //
4825 // TwoToFractionalPartOfX =
4826 // 0.999892986f +
4827 // (0.696457318f +
4828 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
4829 //
4830 // error 0.000107046256, which is 13 to 14 bits
4831 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4832 getF32Constant(DAG, 0x3da235e3, dl));
4833 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4834 getF32Constant(DAG, 0x3e65b8f3, dl));
4835 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4836 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4837 getF32Constant(DAG, 0x3f324b07, dl));
4838 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4839 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4840 getF32Constant(DAG, 0x3f7ff8fd, dl));
4841 } else { // LimitFloatPrecision <= 18
4842 // For floating-point precision of 18:
4843 //
4844 // TwoToFractionalPartOfX =
4845 // 0.999999982f +
4846 // (0.693148872f +
4847 // (0.240227044f +
4848 // (0.554906021e-1f +
4849 // (0.961591928e-2f +
4850 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
4851 // error 2.47208000*10^(-7), which is better than 18 bits
4852 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4853 getF32Constant(DAG, 0x3924b03e, dl));
4854 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4855 getF32Constant(DAG, 0x3ab24b87, dl));
4856 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4857 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4858 getF32Constant(DAG, 0x3c1d8c17, dl));
4859 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4860 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4861 getF32Constant(DAG, 0x3d634a1d, dl));
4862 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4863 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4864 getF32Constant(DAG, 0x3e75fe14, dl));
4865 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4866 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
4867 getF32Constant(DAG, 0x3f317234, dl));
4868 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
4869 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
4870 getF32Constant(DAG, 0x3f800000, dl));
4871 }
4872
4873 // Add the exponent into the result in integer domain.
4874 SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
4875 return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
4876 DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX));
4877}
4878
4879/// expandExp - Lower an exp intrinsic. Handles the special sequences for
4880/// limited-precision mode.
4881static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4882 const TargetLowering &TLI, SDNodeFlags Flags) {
4883 if (Op.getValueType() == MVT::f32 &&
4884 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4885
4886 // Put the exponent in the right bit position for later addition to the
4887 // final result:
4888 //
4889 // t0 = Op * log2(e)
4890
4891 // TODO: What fast-math-flags should be set here?
4892 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
4893 DAG.getConstantFP(numbers::log2ef, dl, MVT::f32));
4894 return getLimitedPrecisionExp2(t0, dl, DAG);
4895 }
4896
4897 // No special expansion.
4898 return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op, Flags);
4899}
4900
4901/// expandLog - Lower a log intrinsic. Handles the special sequences for
4902/// limited-precision mode.
4903static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4904 const TargetLowering &TLI, SDNodeFlags Flags) {
4905 // TODO: What fast-math-flags should be set on the floating-point nodes?
4906
4907 if (Op.getValueType() == MVT::f32 &&
4908 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4909 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4910
4911 // Scale the exponent by log(2).
4912 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
4913 SDValue LogOfExponent =
4914 DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
4915 DAG.getConstantFP(numbers::ln2f, dl, MVT::f32));
4916
4917 // Get the significand and build it into a floating-point number with
4918 // exponent of 1.
4919 SDValue X = GetSignificand(DAG, Op1, dl);
4920
4921 SDValue LogOfMantissa;
4922 if (LimitFloatPrecision <= 6) {
4923 // For floating-point precision of 6:
4924 //
4925 // LogofMantissa =
4926 // -1.1609546f +
4927 // (1.4034025f - 0.23903021f * x) * x;
4928 //
4929 // error 0.0034276066, which is better than 8 bits
4930 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4931 getF32Constant(DAG, 0xbe74c456, dl));
4932 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4933 getF32Constant(DAG, 0x3fb3a2b1, dl));
4934 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4935 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4936 getF32Constant(DAG, 0x3f949a29, dl));
4937 } else if (LimitFloatPrecision <= 12) {
4938 // For floating-point precision of 12:
4939 //
4940 // LogOfMantissa =
4941 // -1.7417939f +
4942 // (2.8212026f +
4943 // (-1.4699568f +
4944 // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
4945 //
4946 // error 0.000061011436, which is 14 bits
4947 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4948 getF32Constant(DAG, 0xbd67b6d6, dl));
4949 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4950 getF32Constant(DAG, 0x3ee4f4b8, dl));
4951 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4952 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4953 getF32Constant(DAG, 0x3fbc278b, dl));
4954 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4955 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4956 getF32Constant(DAG, 0x40348e95, dl));
4957 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4958 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4959 getF32Constant(DAG, 0x3fdef31a, dl));
4960 } else { // LimitFloatPrecision <= 18
4961 // For floating-point precision of 18:
4962 //
4963 // LogOfMantissa =
4964 // -2.1072184f +
4965 // (4.2372794f +
4966 // (-3.7029485f +
4967 // (2.2781945f +
4968 // (-0.87823314f +
4969 // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
4970 //
4971 // error 0.0000023660568, which is better than 18 bits
4972 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4973 getF32Constant(DAG, 0xbc91e5ac, dl));
4974 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4975 getF32Constant(DAG, 0x3e4350aa, dl));
4976 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4977 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4978 getF32Constant(DAG, 0x3f60d3e3, dl));
4979 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4980 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4981 getF32Constant(DAG, 0x4011cdf0, dl));
4982 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4983 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4984 getF32Constant(DAG, 0x406cfd1c, dl));
4985 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4986 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4987 getF32Constant(DAG, 0x408797cb, dl));
4988 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4989 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
4990 getF32Constant(DAG, 0x4006dcab, dl));
4991 }
4992
4993 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
4994 }
4995
4996 // No special expansion.
4997 return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op, Flags);
4998}
4999
5000/// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
5001/// limited-precision mode.
5002static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5003 const TargetLowering &TLI, SDNodeFlags Flags) {
5004 // TODO: What fast-math-flags should be set on the floating-point nodes?
5005
5006 if (Op.getValueType() == MVT::f32 &&
5007 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5008 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5009
5010 // Get the exponent.
5011 SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
5012
5013 // Get the significand and build it into a floating-point number with
5014 // exponent of 1.
5015 SDValue X = GetSignificand(DAG, Op1, dl);
5016
5017 // Different possible minimax approximations of significand in
5018 // floating-point for various degrees of accuracy over [1,2].
5019 SDValue Log2ofMantissa;
5020 if (LimitFloatPrecision <= 6) {
5021 // For floating-point precision of 6:
5022 //
5023 // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
5024 //
5025 // error 0.0049451742, which is more than 7 bits
5026 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5027 getF32Constant(DAG, 0xbeb08fe0, dl));
5028 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5029 getF32Constant(DAG, 0x40019463, dl));
5030 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5031 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5032 getF32Constant(DAG, 0x3fd6633d, dl));
5033 } else if (LimitFloatPrecision <= 12) {
5034 // For floating-point precision of 12:
5035 //
5036 // Log2ofMantissa =
5037 // -2.51285454f +
5038 // (4.07009056f +
5039 // (-2.12067489f +
5040 // (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
5041 //
5042 // error 0.0000876136000, which is better than 13 bits
5043 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5044 getF32Constant(DAG, 0xbda7262e, dl));
5045 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5046 getF32Constant(DAG, 0x3f25280b, dl));
5047 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5048 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5049 getF32Constant(DAG, 0x4007b923, dl));
5050 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5051 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5052 getF32Constant(DAG, 0x40823e2f, dl));
5053 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5054 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5055 getF32Constant(DAG, 0x4020d29c, dl));
5056 } else { // LimitFloatPrecision <= 18
5057 // For floating-point precision of 18:
5058 //
5059 // Log2ofMantissa =
5060 // -3.0400495f +
5061 // (6.1129976f +
5062 // (-5.3420409f +
5063 // (3.2865683f +
5064 // (-1.2669343f +
5065 // (0.27515199f -
5066 // 0.25691327e-1f * x) * x) * x) * x) * x) * x;
5067 //
5068 // error 0.0000018516, which is better than 18 bits
5069 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5070 getF32Constant(DAG, 0xbcd2769e, dl));
5071 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5072 getF32Constant(DAG, 0x3e8ce0b9, dl));
5073 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5074 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5075 getF32Constant(DAG, 0x3fa22ae7, dl));
5076 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5077 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5078 getF32Constant(DAG, 0x40525723, dl));
5079 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5080 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5081 getF32Constant(DAG, 0x40aaf200, dl));
5082 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5083 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5084 getF32Constant(DAG, 0x40c39dad, dl));
5085 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5086 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5087 getF32Constant(DAG, 0x4042902c, dl));
5088 }
5089
5090 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5091 }
5092
5093 // No special expansion.
5094 return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op, Flags);
5095}
5096
5097/// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
5098/// limited-precision mode.
5099static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5100 const TargetLowering &TLI, SDNodeFlags Flags) {
5101 // TODO: What fast-math-flags should be set on the floating-point nodes?
5102
5103 if (Op.getValueType() == MVT::f32 &&
5104 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5105 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5106
5107 // Scale the exponent by log10(2) [0.30102999f].
5108 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5109 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5110 getF32Constant(DAG, 0x3e9a209a, dl));
5111
5112 // Get the significand and build it into a floating-point number with
5113 // exponent of 1.
5114 SDValue X = GetSignificand(DAG, Op1, dl);
5115
5116 SDValue Log10ofMantissa;
5117 if (LimitFloatPrecision <= 6) {
5118 // For floating-point precision of 6:
5119 //
5120 // Log10ofMantissa =
5121 // -0.50419619f +
5122 // (0.60948995f - 0.10380950f * x) * x;
5123 //
5124 // error 0.0014886165, which is 6 bits
5125 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5126 getF32Constant(DAG, 0xbdd49a13, dl));
5127 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5128 getF32Constant(DAG, 0x3f1c0789, dl));
5129 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5130 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5131 getF32Constant(DAG, 0x3f011300, dl));
5132 } else if (LimitFloatPrecision <= 12) {
5133 // For floating-point precision of 12:
5134 //
5135 // Log10ofMantissa =
5136 // -0.64831180f +
5137 // (0.91751397f +
5138 // (-0.31664806f + 0.47637168e-1f * x) * x) * x;
5139 //
5140 // error 0.00019228036, which is better than 12 bits
5141 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5142 getF32Constant(DAG, 0x3d431f31, dl));
5143 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5144 getF32Constant(DAG, 0x3ea21fb2, dl));
5145 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5146 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5147 getF32Constant(DAG, 0x3f6ae232, dl));
5148 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5149 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5150 getF32Constant(DAG, 0x3f25f7c3, dl));
5151 } else { // LimitFloatPrecision <= 18
5152 // For floating-point precision of 18:
5153 //
5154 // Log10ofMantissa =
5155 // -0.84299375f +
5156 // (1.5327582f +
5157 // (-1.0688956f +
5158 // (0.49102474f +
5159 // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
5160 //
5161 // error 0.0000037995730, which is better than 18 bits
5162 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5163 getF32Constant(DAG, 0x3c5d51ce, dl));
5164 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5165 getF32Constant(DAG, 0x3e00685a, dl));
5166 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5167 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5168 getF32Constant(DAG, 0x3efb6798, dl));
5169 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5170 SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5171 getF32Constant(DAG, 0x3f88d192, dl));
5172 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5173 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5174 getF32Constant(DAG, 0x3fc4316c, dl));
5175 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5176 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
5177 getF32Constant(DAG, 0x3f57ce70, dl));
5178 }
5179
5180 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
5181 }
5182
5183 // No special expansion.
5184 return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op, Flags);
5185}
5186
5187/// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
5188/// limited-precision mode.
5189static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5190 const TargetLowering &TLI, SDNodeFlags Flags) {
5191 if (Op.getValueType() == MVT::f32 &&
5192 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18)
5193 return getLimitedPrecisionExp2(Op, dl, DAG);
5194
5195 // No special expansion.
5196 return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op, Flags);
5197}
5198
5199/// visitPow - Lower a pow intrinsic. Handles the special sequences for
5200/// limited-precision mode with x == 10.0f.
5201static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS,
5202 SelectionDAG &DAG, const TargetLowering &TLI,
5203 SDNodeFlags Flags) {
5204 bool IsExp10 = false;
5205 if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
5206 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5207 if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
5208 APFloat Ten(10.0f);
5209 IsExp10 = LHSC->isExactlyValue(Ten);
5210 }
5211 }
5212
5213 // TODO: What fast-math-flags should be set on the FMUL node?
5214 if (IsExp10) {
5215 // Put the exponent in the right bit position for later addition to the
5216 // final result:
5217 //
5218 // #define LOG2OF10 3.3219281f
5219 // t0 = Op * LOG2OF10;
5220 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
5221 getF32Constant(DAG, 0x40549a78, dl));
5222 return getLimitedPrecisionExp2(t0, dl, DAG);
5223 }
5224
5225 // No special expansion.
5226 return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS, Flags);
5227}
5228
5229/// ExpandPowI - Expand a llvm.powi intrinsic.
5230static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS,
5231 SelectionDAG &DAG) {
5232 // If RHS is a constant, we can expand this out to a multiplication tree,
5233 // otherwise we end up lowering to a call to __powidf2 (for example). When
5234 // optimizing for size, we only want to do this if the expansion would produce
5235 // a small number of multiplies, otherwise we do the full expansion.
5236 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
5237 // Get the exponent as a positive value.
5238 unsigned Val = RHSC->getSExtValue();
5239 if ((int)Val < 0) Val = -Val;
5240
5241 // powi(x, 0) -> 1.0
5242 if (Val == 0)
5243 return DAG.getConstantFP(1.0, DL, LHS.getValueType());
5244
5245 bool OptForSize = DAG.shouldOptForSize();
5246 if (!OptForSize ||
5247 // If optimizing for size, don't insert too many multiplies.
5248 // This inserts up to 5 multiplies.
5249 countPopulation(Val) + Log2_32(Val) < 7) {
5250 // We use the simple binary decomposition method to generate the multiply
5251 // sequence. There are more optimal ways to do this (for example,
5252 // powi(x,15) generates one more multiply than it should), but this has
5253 // the benefit of being both really simple and much better than a libcall.
5254 SDValue Res; // Logically starts equal to 1.0
5255 SDValue CurSquare = LHS;
5256 // TODO: Intrinsics should have fast-math-flags that propagate to these
5257 // nodes.
5258 while (Val) {
5259 if (Val & 1) {
5260 if (Res.getNode())
5261 Res = DAG.getNode(ISD::FMUL, DL,Res.getValueType(), Res, CurSquare);
5262 else
5263 Res = CurSquare; // 1.0*CurSquare.
5264 }
5265
5266 CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
5267 CurSquare, CurSquare);
5268 Val >>= 1;
5269 }
5270
5271 // If the original was negative, invert the result, producing 1/(x*x*x).
5272 if (RHSC->getSExtValue() < 0)
5273 Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
5274 DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res);
5275 return Res;
5276 }
5277 }
5278
5279 // Otherwise, expand to a libcall.
5280 return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
5281}
5282
5283static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL,
5284 SDValue LHS, SDValue RHS, SDValue Scale,
5285 SelectionDAG &DAG, const TargetLowering &TLI) {
5286 EVT VT = LHS.getValueType();
5287 bool Signed = Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT;
5288 bool Saturating = Opcode == ISD::SDIVFIXSAT || Opcode == ISD::UDIVFIXSAT;
5289 LLVMContext &Ctx = *DAG.getContext();
5290
5291 // If the type is legal but the operation isn't, this node might survive all
5292 // the way to operation legalization. If we end up there and we do not have
5293 // the ability to widen the type (if VT*2 is not legal), we cannot expand the
5294 // node.
5295
5296 // Coax the legalizer into expanding the node during type legalization instead
5297 // by bumping the size by one bit. This will force it to Promote, enabling the
5298 // early expansion and avoiding the need to expand later.
5299
5300 // We don't have to do this if Scale is 0; that can always be expanded, unless
5301 // it's a saturating signed operation. Those can experience true integer
5302 // division overflow, a case which we must avoid.
5303
5304 // FIXME: We wouldn't have to do this (or any of the early
5305 // expansion/promotion) if it was possible to expand a libcall of an
5306 // illegal type during operation legalization. But it's not, so things
5307 // get a bit hacky.
5308 unsigned ScaleInt = cast<ConstantSDNode>(Scale)->getZExtValue();
5309 if ((ScaleInt > 0 || (Saturating && Signed)) &&
5310 (TLI.isTypeLegal(VT) ||
5311 (VT.isVector() && TLI.isTypeLegal(VT.getVectorElementType())))) {
5312 TargetLowering::LegalizeAction Action = TLI.getFixedPointOperationAction(
5313 Opcode, VT, ScaleInt);
5314 if (Action != TargetLowering::Legal && Action != TargetLowering::Custom) {
5315 EVT PromVT;
5316 if (VT.isScalarInteger())
5317 PromVT = EVT::getIntegerVT(Ctx, VT.getSizeInBits() + 1);
5318 else if (VT.isVector()) {
5319 PromVT = VT.getVectorElementType();
5320 PromVT = EVT::getIntegerVT(Ctx, PromVT.getSizeInBits() + 1);
5321 PromVT = EVT::getVectorVT(Ctx, PromVT, VT.getVectorElementCount());
5322 } else
5323 llvm_unreachable("Wrong VT for DIVFIX?");
5324 if (Signed) {
5325 LHS = DAG.getSExtOrTrunc(LHS, DL, PromVT);
5326 RHS = DAG.getSExtOrTrunc(RHS, DL, PromVT);
5327 } else {
5328 LHS = DAG.getZExtOrTrunc(LHS, DL, PromVT);
5329 RHS = DAG.getZExtOrTrunc(RHS, DL, PromVT);
5330 }
5331 EVT ShiftTy = TLI.getShiftAmountTy(PromVT, DAG.getDataLayout());
5332 // For saturating operations, we need to shift up the LHS to get the
5333 // proper saturation width, and then shift down again afterwards.
5334 if (Saturating)
5335 LHS = DAG.getNode(ISD::SHL, DL, PromVT, LHS,
5336 DAG.getConstant(1, DL, ShiftTy));
5337 SDValue Res = DAG.getNode(Opcode, DL, PromVT, LHS, RHS, Scale);
5338 if (Saturating)
5339 Res = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, PromVT, Res,
5340 DAG.getConstant(1, DL, ShiftTy));
5341 return DAG.getZExtOrTrunc(Res, DL, VT);
5342 }
5343 }
5344
5345 return DAG.getNode(Opcode, DL, VT, LHS, RHS, Scale);
5346}
5347
5348// getUnderlyingArgRegs - Find underlying registers used for a truncated,
5349// bitcasted, or split argument. Returns a list of <Register, size in bits>
5350static void
5351getUnderlyingArgRegs(SmallVectorImpl<std::pair<unsigned, TypeSize>> &Regs,
5352 const SDValue &N) {
5353 switch (N.getOpcode()) {
5354 case ISD::CopyFromReg: {
5355 SDValue Op = N.getOperand(1);
5356 Regs.emplace_back(cast<RegisterSDNode>(Op)->getReg(),
5357 Op.getValueType().getSizeInBits());
5358 return;
5359 }
5360 case ISD::BITCAST:
5361 case ISD::AssertZext:
5362 case ISD::AssertSext:
5363 case ISD::TRUNCATE:
5364 getUnderlyingArgRegs(Regs, N.getOperand(0));
5365 return;
5366 case ISD::BUILD_PAIR:
5367 case ISD::BUILD_VECTOR:
5368 case ISD::CONCAT_VECTORS:
5369 for (SDValue Op : N->op_values())
5370 getUnderlyingArgRegs(Regs, Op);
5371 return;
5372 default:
5373 return;
5374 }
5375}
5376
5377/// If the DbgValueInst is a dbg_value of a function argument, create the
5378/// corresponding DBG_VALUE machine instruction for it now. At the end of
5379/// instruction selection, they will be inserted to the entry BB.
5380bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
5381 const Value *V, DILocalVariable *Variable, DIExpression *Expr,
5382 DILocation *DL, bool IsDbgDeclare, const SDValue &N) {
5383 const Argument *Arg = dyn_cast<Argument>(V);
5384 if (!Arg)
5385 return false;
5386
5387 if (!IsDbgDeclare) {
5388 // ArgDbgValues are hoisted to the beginning of the entry block. So we
5389 // should only emit as ArgDbgValue if the dbg.value intrinsic is found in
5390 // the entry block.
5391 bool IsInEntryBlock = FuncInfo.MBB == &FuncInfo.MF->front();
5392 if (!IsInEntryBlock)
5393 return false;
5394
5395 // ArgDbgValues are hoisted to the beginning of the entry block. So we
5396 // should only emit as ArgDbgValue if the dbg.value intrinsic describes a
5397 // variable that also is a param.
5398 //
5399 // Although, if we are at the top of the entry block already, we can still
5400 // emit using ArgDbgValue. This might catch some situations when the
5401 // dbg.value refers to an argument that isn't used in the entry block, so
5402 // any CopyToReg node would be optimized out and the only way to express
5403 // this DBG_VALUE is by using the physical reg (or FI) as done in this
5404 // method. ArgDbgValues are hoisted to the beginning of the entry block. So
5405 // we should only emit as ArgDbgValue if the Variable is an argument to the
5406 // current function, and the dbg.value intrinsic is found in the entry
5407 // block.
5408 bool VariableIsFunctionInputArg = Variable->isParameter() &&
5409 !DL->getInlinedAt();
5410 bool IsInPrologue = SDNodeOrder == LowestSDNodeOrder;
5411 if (!IsInPrologue && !VariableIsFunctionInputArg)
5412 return false;
5413
5414 // Here we assume that a function argument on IR level only can be used to
5415 // describe one input parameter on source level. If we for example have
5416 // source code like this
5417 //
5418 // struct A { long x, y; };
5419 // void foo(struct A a, long b) {
5420 // ...
5421 // b = a.x;
5422 // ...
5423 // }
5424 //
5425 // and IR like this
5426 //
5427 // define void @foo(i32 %a1, i32 %a2, i32 %b) {
5428 // entry:
5429 // call void @llvm.dbg.value(metadata i32 %a1, "a", DW_OP_LLVM_fragment
5430 // call void @llvm.dbg.value(metadata i32 %a2, "a", DW_OP_LLVM_fragment
5431 // call void @llvm.dbg.value(metadata i32 %b, "b",
5432 // ...
5433 // call void @llvm.dbg.value(metadata i32 %a1, "b"
5434 // ...
5435 //
5436 // then the last dbg.value is describing a parameter "b" using a value that
5437 // is an argument. But since we already has used %a1 to describe a parameter
5438 // we should not handle that last dbg.value here (that would result in an
5439 // incorrect hoisting of the DBG_VALUE to the function entry).
5440 // Notice that we allow one dbg.value per IR level argument, to accommodate
5441 // for the situation with fragments above.
5442 if (VariableIsFunctionInputArg) {
5443 unsigned ArgNo = Arg->getArgNo();
5444 if (ArgNo >= FuncInfo.DescribedArgs.size())
5445 FuncInfo.DescribedArgs.resize(ArgNo + 1, false);
5446 else if (!IsInPrologue && FuncInfo.DescribedArgs.test(ArgNo))
5447 return false;
5448 FuncInfo.DescribedArgs.set(ArgNo);
5449 }
5450 }
5451
5452 MachineFunction &MF = DAG.getMachineFunction();
5453 const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
5454
5455 bool IsIndirect = false;
5456 Optional<MachineOperand> Op;
5457 // Some arguments' frame index is recorded during argument lowering.
5458 int FI = FuncInfo.getArgumentFrameIndex(Arg);
5459 if (FI != std::numeric_limits<int>::max())
5460 Op = MachineOperand::CreateFI(FI);
5461
5462 SmallVector<std::pair<unsigned, TypeSize>, 8> ArgRegsAndSizes;
5463 if (!Op && N.getNode()) {
5464 getUnderlyingArgRegs(ArgRegsAndSizes, N);
5465 Register Reg;
5466 if (ArgRegsAndSizes.size() == 1)
5467 Reg = ArgRegsAndSizes.front().first;
5468
5469 if (Reg && Reg.isVirtual()) {
5470 MachineRegisterInfo &RegInfo = MF.getRegInfo();
5471 Register PR = RegInfo.getLiveInPhysReg(Reg);
5472 if (PR)
5473 Reg = PR;
5474 }
5475 if (Reg) {
5476 Op = MachineOperand::CreateReg(Reg, false);
5477 IsIndirect = IsDbgDeclare;
5478 }
5479 }
5480
5481 if (!Op && N.getNode()) {
5482 // Check if frame index is available.
5483 SDValue LCandidate = peekThroughBitcasts(N);
5484 if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(LCandidate.getNode()))
5485 if (FrameIndexSDNode *FINode =
5486 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
5487 Op = MachineOperand::CreateFI(FINode->getIndex());
5488 }
5489
5490 if (!Op) {
5491 // Create a DBG_VALUE for each decomposed value in ArgRegs to cover Reg
5492 auto splitMultiRegDbgValue = [&](ArrayRef<std::pair<unsigned, TypeSize>>
5493 SplitRegs) {
5494 unsigned Offset = 0;
5495 for (auto RegAndSize : SplitRegs) {
5496 // If the expression is already a fragment, the current register
5497 // offset+size might extend beyond the fragment. In this case, only
5498 // the register bits that are inside the fragment are relevant.
5499 int RegFragmentSizeInBits = RegAndSize.second;
5500 if (auto ExprFragmentInfo = Expr->getFragmentInfo()) {
5501 uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
5502 // The register is entirely outside the expression fragment,
5503 // so is irrelevant for debug info.
5504 if (Offset >= ExprFragmentSizeInBits)
5505 break;
5506 // The register is partially outside the expression fragment, only
5507 // the low bits within the fragment are relevant for debug info.
5508 if (Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
5509 RegFragmentSizeInBits = ExprFragmentSizeInBits - Offset;
5510 }
5511 }
5512
5513 auto FragmentExpr = DIExpression::createFragmentExpression(
5514 Expr, Offset, RegFragmentSizeInBits);
5515 Offset += RegAndSize.second;
5516 // If a valid fragment expression cannot be created, the variable's
5517 // correct value cannot be determined and so it is set as Undef.
5518 if (!FragmentExpr) {
5519 SDDbgValue *SDV = DAG.getConstantDbgValue(
5520 Variable, Expr, UndefValue::get(V->getType()), DL, SDNodeOrder);
5521 DAG.AddDbgValue(SDV, nullptr, false);
5522 continue;
5523 }
5524 assert(!IsDbgDeclare && "DbgDeclare operand is not in memory?");
5525 FuncInfo.ArgDbgValues.push_back(
5526 BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsDbgDeclare,
5527 RegAndSize.first, Variable, *FragmentExpr));
5528 }
5529 };
5530
5531 // Check if ValueMap has reg number.
5532 DenseMap<const Value *, Register>::const_iterator
5533 VMI = FuncInfo.ValueMap.find(V);
5534 if (VMI != FuncInfo.ValueMap.end()) {
5535 const auto &TLI = DAG.getTargetLoweringInfo();
5536 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second,
5537 V->getType(), None);
5538 if (RFV.occupiesMultipleRegs()) {
5539 splitMultiRegDbgValue(RFV.getRegsAndSizes());
5540 return true;
5541 }
5542
5543 Op = MachineOperand::CreateReg(VMI->second, false);
5544 IsIndirect = IsDbgDeclare;
5545 } else if (ArgRegsAndSizes.size() > 1) {
5546 // This was split due to the calling convention, and no virtual register
5547 // mapping exists for the value.
5548 splitMultiRegDbgValue(ArgRegsAndSizes);
5549 return true;
5550 }
5551 }
5552
5553 if (!Op)
5554 return false;
5555
5556 assert(Variable->isValidLocationForIntrinsic(DL) &&
5557 "Expected inlined-at fields to agree");
5558 IsIndirect = (Op->isReg()) ? IsIndirect : true;
5559 FuncInfo.ArgDbgValues.push_back(
5560 BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsIndirect,
5561 *Op, Variable, Expr));
5562
5563 return true;
5564}
5565
5566/// Return the appropriate SDDbgValue based on N.
5567SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N,
5568 DILocalVariable *Variable,
5569 DIExpression *Expr,
5570 const DebugLoc &dl,
5571 unsigned DbgSDNodeOrder) {
5572 if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
5573 // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe
5574 // stack slot locations.
5575 //
5576 // Consider "int x = 0; int *px = &x;". There are two kinds of interesting
5577 // debug values here after optimization:
5578 //
5579 // dbg.value(i32* %px, !"int *px", !DIExpression()), and
5580 // dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
5581 //
5582 // Both describe the direct values of their associated variables.
5583 return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
5584 /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5585 }
5586 return DAG.getDbgValue(Variable, Expr, N.getNode(), N.getResNo(),
5587 /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5588}
5589
5590static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic) {
5591 switch (Intrinsic) {
5592 case Intrinsic::smul_fix:
5593 return ISD::SMULFIX;
5594 case Intrinsic::umul_fix:
5595 return ISD::UMULFIX;
5596 case Intrinsic::smul_fix_sat:
5597 return ISD::SMULFIXSAT;
5598 case Intrinsic::umul_fix_sat:
5599 return ISD::UMULFIXSAT;
5600 case Intrinsic::sdiv_fix:
5601 return ISD::SDIVFIX;
5602 case Intrinsic::udiv_fix:
5603 return ISD::UDIVFIX;
5604 case Intrinsic::sdiv_fix_sat:
5605 return ISD::SDIVFIXSAT;
5606 case Intrinsic::udiv_fix_sat:
5607 return ISD::UDIVFIXSAT;
5608 default:
5609 llvm_unreachable("Unhandled fixed point intrinsic");
5610 }
5611}
5612
5613void SelectionDAGBuilder::lowerCallToExternalSymbol(const CallInst &I,
5614 const char *FunctionName) {
5615 assert(FunctionName && "FunctionName must not be nullptr");
5616 SDValue Callee = DAG.getExternalSymbol(
5617 FunctionName,
5618 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()));
5619 LowerCallTo(I, Callee, I.isTailCall());
5620}
5621
5622/// Given a @llvm.call.preallocated.setup, return the corresponding
5623/// preallocated call.
5624static const CallBase *FindPreallocatedCall(const Value *PreallocatedSetup) {
5625 assert(cast<CallBase>(PreallocatedSetup)
5626 ->getCalledFunction()
5627 ->getIntrinsicID() == Intrinsic::call_preallocated_setup &&
5628 "expected call_preallocated_setup Value");
5629 for (auto *U : PreallocatedSetup->users()) {
5630 auto *UseCall = cast<CallBase>(U);
5631 const Function *Fn = UseCall->getCalledFunction();
5632 if (!Fn || Fn->getIntrinsicID() != Intrinsic::call_preallocated_arg) {
5633 return UseCall;
5634 }
5635 }
5636 llvm_unreachable("expected corresponding call to preallocated setup/arg");
5637}
5638
5639/// Lower the call to the specified intrinsic function.
5640void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
5641 unsigned Intrinsic) {
5642 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5643 SDLoc sdl = getCurSDLoc();
5644 DebugLoc dl = getCurDebugLoc();
5645 SDValue Res;
5646
5647 SDNodeFlags Flags;
5648 if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
5649 Flags.copyFMF(*FPOp);
5650
5651 switch (Intrinsic) {
5652 default:
5653 // By default, turn this into a target intrinsic node.
5654 visitTargetIntrinsic(I, Intrinsic);
5655 return;
5656 case Intrinsic::vscale: {
5657 match(&I, m_VScale(DAG.getDataLayout()));
5658 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5659 setValue(&I,
5660 DAG.getVScale(getCurSDLoc(), VT, APInt(VT.getSizeInBits(), 1)));
5661 return;
5662 }
5663 case Intrinsic::vastart: visitVAStart(I); return;
5664 case Intrinsic::vaend: visitVAEnd(I); return;
5665 case Intrinsic::vacopy: visitVACopy(I); return;
5666 case Intrinsic::returnaddress:
5667 setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl,
5668 TLI.getPointerTy(DAG.getDataLayout()),
5669 getValue(I.getArgOperand(0))));
5670 return;
5671 case Intrinsic::addressofreturnaddress:
5672 setValue(&I, DAG.getNode(ISD::ADDROFRETURNADDR, sdl,
5673 TLI.getPointerTy(DAG.getDataLayout())));
5674 return;
5675 case Intrinsic::sponentry:
5676 setValue(&I, DAG.getNode(ISD::SPONENTRY, sdl,
5677 TLI.getFrameIndexTy(DAG.getDataLayout())));
5678 return;
5679 case Intrinsic::frameaddress:
5680 setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl,
5681 TLI.getFrameIndexTy(DAG.getDataLayout()),
5682 getValue(I.getArgOperand(0))));
5683 return;
5684 case Intrinsic::read_volatile_register:
5685 case Intrinsic::read_register: {
5686 Value *Reg = I.getArgOperand(0);
5687 SDValue Chain = getRoot();
5688 SDValue RegName =
5689 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
5690 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5691 Res = DAG.getNode(ISD::READ_REGISTER, sdl,
5692 DAG.getVTList(VT, MVT::Other), Chain, RegName);
5693 setValue(&I, Res);
5694 DAG.setRoot(Res.getValue(1));
5695 return;
5696 }
5697 case Intrinsic::write_register: {
5698 Value *Reg = I.getArgOperand(0);
5699 Value *RegValue = I.getArgOperand(1);
5700 SDValue Chain = getRoot();
5701 SDValue RegName =
5702 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
5703 DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain,
5704 RegName, getValue(RegValue)));
5705 return;
5706 }
5707 case Intrinsic::memcpy: {
5708 const auto &MCI = cast<MemCpyInst>(I);
5709 SDValue Op1 = getValue(I.getArgOperand(0));
5710 SDValue Op2 = getValue(I.getArgOperand(1));
5711 SDValue Op3 = getValue(I.getArgOperand(2));
5712 // @llvm.memcpy defines 0 and 1 to both mean no alignment.
5713 Align DstAlign = MCI.getDestAlign().valueOrOne();
5714 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
5715 Align Alignment = commonAlignment(DstAlign, SrcAlign);
5716 bool isVol = MCI.isVolatile();
5717 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5718 // FIXME: Support passing different dest/src alignments to the memcpy DAG
5719 // node.
5720 SDValue Root = isVol ? getRoot() : getMemoryRoot();
5721 SDValue MC = DAG.getMemcpy(Root, sdl, Op1, Op2, Op3, Alignment, isVol,
5722 /* AlwaysInline */ false, isTC,
5723 MachinePointerInfo(I.getArgOperand(0)),
5724 MachinePointerInfo(I.getArgOperand(1)));
5725 updateDAGForMaybeTailCall(MC);
5726 return;
5727 }
5728 case Intrinsic::memcpy_inline: {
5729 const auto &MCI = cast<MemCpyInlineInst>(I);
5730 SDValue Dst = getValue(I.getArgOperand(0));
5731 SDValue Src = getValue(I.getArgOperand(1));
5732 SDValue Size = getValue(I.getArgOperand(2));
5733 assert(isa<ConstantSDNode>(Size) && "memcpy_inline needs constant size");
5734 // @llvm.memcpy.inline defines 0 and 1 to both mean no alignment.
5735 Align DstAlign = MCI.getDestAlign().valueOrOne();
5736 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
5737 Align Alignment = commonAlignment(DstAlign, SrcAlign);
5738 bool isVol = MCI.isVolatile();
5739 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5740 // FIXME: Support passing different dest/src alignments to the memcpy DAG
5741 // node.
5742 SDValue MC = DAG.getMemcpy(getRoot(), sdl, Dst, Src, Size, Alignment, isVol,
5743 /* AlwaysInline */ true, isTC,
5744 MachinePointerInfo(I.getArgOperand(0)),
5745 MachinePointerInfo(I.getArgOperand(1)));
5746 updateDAGForMaybeTailCall(MC);
5747 return;
5748 }
5749 case Intrinsic::memset: {
5750 const auto &MSI = cast<MemSetInst>(I);
5751 SDValue Op1 = getValue(I.getArgOperand(0));
5752 SDValue Op2 = getValue(I.getArgOperand(1));
5753 SDValue Op3 = getValue(I.getArgOperand(2));
5754 // @llvm.memset defines 0 and 1 to both mean no alignment.
5755 Align Alignment = MSI.getDestAlign().valueOrOne();
5756 bool isVol = MSI.isVolatile();
5757 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5758 SDValue Root = isVol ? getRoot() : getMemoryRoot();
5759 SDValue MS = DAG.getMemset(Root, sdl, Op1, Op2, Op3, Alignment, isVol, isTC,
5760 MachinePointerInfo(I.getArgOperand(0)));
5761 updateDAGForMaybeTailCall(MS);
5762 return;
5763 }
5764 case Intrinsic::memmove: {
5765 const auto &MMI = cast<MemMoveInst>(I);
5766 SDValue Op1 = getValue(I.getArgOperand(0));
5767 SDValue Op2 = getValue(I.getArgOperand(1));
5768 SDValue Op3 = getValue(I.getArgOperand(2));
5769 // @llvm.memmove defines 0 and 1 to both mean no alignment.
5770 Align DstAlign = MMI.getDestAlign().valueOrOne();
5771 Align SrcAlign = MMI.getSourceAlign().valueOrOne();
5772 Align Alignment = commonAlignment(DstAlign, SrcAlign);
5773 bool isVol = MMI.isVolatile();
5774 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5775 // FIXME: Support passing different dest/src alignments to the memmove DAG
5776 // node.
5777 SDValue Root = isVol ? getRoot() : getMemoryRoot();
5778 SDValue MM = DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Alignment, isVol,
5779 isTC, MachinePointerInfo(I.getArgOperand(0)),
5780 MachinePointerInfo(I.getArgOperand(1)));
5781 updateDAGForMaybeTailCall(MM);
5782 return;
5783 }
5784 case Intrinsic::memcpy_element_unordered_atomic: {
5785 const AtomicMemCpyInst &MI = cast<AtomicMemCpyInst>(I);
5786 SDValue Dst = getValue(MI.getRawDest());
5787 SDValue Src = getValue(MI.getRawSource());
5788 SDValue Length = getValue(MI.getLength());
5789
5790 unsigned DstAlign = MI.getDestAlignment();
5791 unsigned SrcAlign = MI.getSourceAlignment();
5792 Type *LengthTy = MI.getLength()->getType();
5793 unsigned ElemSz = MI.getElementSizeInBytes();
5794 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5795 SDValue MC = DAG.getAtomicMemcpy(getRoot(), sdl, Dst, DstAlign, Src,
5796 SrcAlign, Length, LengthTy, ElemSz, isTC,
5797 MachinePointerInfo(MI.getRawDest()),
5798 MachinePointerInfo(MI.getRawSource()));
5799 updateDAGForMaybeTailCall(MC);
5800 return;
5801 }
5802 case Intrinsic::memmove_element_unordered_atomic: {
5803 auto &MI = cast<AtomicMemMoveInst>(I);
5804 SDValue Dst = getValue(MI.getRawDest());
5805 SDValue Src = getValue(MI.getRawSource());
5806 SDValue Length = getValue(MI.getLength());
5807
5808 unsigned DstAlign = MI.getDestAlignment();
5809 unsigned SrcAlign = MI.getSourceAlignment();
5810 Type *LengthTy = MI.getLength()->getType();
5811 unsigned ElemSz = MI.getElementSizeInBytes();
5812 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5813 SDValue MC = DAG.getAtomicMemmove(getRoot(), sdl, Dst, DstAlign, Src,
5814 SrcAlign, Length, LengthTy, ElemSz, isTC,
5815 MachinePointerInfo(MI.getRawDest()),
5816 MachinePointerInfo(MI.getRawSource()));
5817 updateDAGForMaybeTailCall(MC);
5818 return;
5819 }
5820 case Intrinsic::memset_element_unordered_atomic: {
5821 auto &MI = cast<AtomicMemSetInst>(I);
5822 SDValue Dst = getValue(MI.getRawDest());
5823 SDValue Val = getValue(MI.getValue());
5824 SDValue Length = getValue(MI.getLength());
5825
5826 unsigned DstAlign = MI.getDestAlignment();
5827 Type *LengthTy = MI.getLength()->getType();
5828 unsigned ElemSz = MI.getElementSizeInBytes();
5829 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5830 SDValue MC = DAG.getAtomicMemset(getRoot(), sdl, Dst, DstAlign, Val, Length,
5831 LengthTy, ElemSz, isTC,
5832 MachinePointerInfo(MI.getRawDest()));
5833 updateDAGForMaybeTailCall(MC);
5834 return;
5835 }
5836 case Intrinsic::call_preallocated_setup: {
5837 const CallBase *PreallocatedCall = FindPreallocatedCall(&I);
5838 SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
5839 SDValue Res = DAG.getNode(ISD::PREALLOCATED_SETUP, sdl, MVT::Other,
5840 getRoot(), SrcValue);
5841 setValue(&I, Res);
5842 DAG.setRoot(Res);
5843 return;
5844 }
5845 case Intrinsic::call_preallocated_arg: {
5846 const CallBase *PreallocatedCall = FindPreallocatedCall(I.getOperand(0));
5847 SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
5848 SDValue Ops[3];
5849 Ops[0] = getRoot();
5850 Ops[1] = SrcValue;
5851 Ops[2] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(1)), sdl,
5852 MVT::i32); // arg index
5853 SDValue Res = DAG.getNode(
5854 ISD::PREALLOCATED_ARG, sdl,
5855 DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Ops);
5856 setValue(&I, Res);
5857 DAG.setRoot(Res.getValue(1));
5858 return;
5859 }
5860 case Intrinsic::dbg_addr:
5861 case Intrinsic::dbg_declare: {
5862 const auto &DI = cast<DbgVariableIntrinsic>(I);
5863 DILocalVariable *Variable = DI.getVariable();
5864 DIExpression *Expression = DI.getExpression();
5865 dropDanglingDebugInfo(Variable, Expression);
5866 assert(Variable && "Missing variable");
5867 LLVM_DEBUG(dbgs() << "SelectionDAG visiting debug intrinsic: " << DI
5868 << "\n");
5869 // Check if address has undef value.
5870 const Value *Address = DI.getVariableLocation();
5871 if (!Address || isa<UndefValue>(Address) ||
5872 (Address->use_empty() && !isa<Argument>(Address))) {
5873 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI
5874 << " (bad/undef/unused-arg address)\n");
5875 return;
5876 }
5877
5878 bool isParameter = Variable->isParameter() || isa<Argument>(Address);
5879
5880 // Check if this variable can be described by a frame index, typically
5881 // either as a static alloca or a byval parameter.
5882 int FI = std::numeric_limits<int>::max();
5883 if (const auto *AI =
5884 dyn_cast<AllocaInst>(Address->stripInBoundsConstantOffsets())) {
5885 if (AI->isStaticAlloca()) {
5886 auto I = FuncInfo.StaticAllocaMap.find(AI);
5887 if (I != FuncInfo.StaticAllocaMap.end())
5888 FI = I->second;
5889 }
5890 } else if (const auto *Arg = dyn_cast<Argument>(
5891 Address->stripInBoundsConstantOffsets())) {
5892 FI = FuncInfo.getArgumentFrameIndex(Arg);
5893 }
5894
5895 // llvm.dbg.addr is control dependent and always generates indirect
5896 // DBG_VALUE instructions. llvm.dbg.declare is handled as a frame index in
5897 // the MachineFunction variable table.
5898 if (FI != std::numeric_limits<int>::max()) {
5899 if (Intrinsic == Intrinsic::dbg_addr) {
5900 SDDbgValue *SDV = DAG.getFrameIndexDbgValue(
5901 Variable, Expression, FI, /*IsIndirect*/ true, dl, SDNodeOrder);
5902 DAG.AddDbgValue(SDV, getRoot().getNode(), isParameter);
5903 } else {
5904 LLVM_DEBUG(dbgs() << "Skipping " << DI
5905 << " (variable info stashed in MF side table)\n");
5906 }
5907 return;
5908 }
5909
5910 SDValue &N = NodeMap[Address];
5911 if (!N.getNode() && isa<Argument>(Address))
5912 // Check unused arguments map.
5913 N = UnusedArgNodeMap[Address];
5914 SDDbgValue *SDV;
5915 if (N.getNode()) {
5916 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
5917 Address = BCI->getOperand(0);
5918 // Parameters are handled specially.
5919 auto FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
5920 if (isParameter && FINode) {
5921 // Byval parameter. We have a frame index at this point.
5922 SDV =
5923 DAG.getFrameIndexDbgValue(Variable, Expression, FINode->getIndex(),
5924 /*IsIndirect*/ true, dl, SDNodeOrder);
5925 } else if (isa<Argument>(Address)) {
5926 // Address is an argument, so try to emit its dbg value using
5927 // virtual register info from the FuncInfo.ValueMap.
5928 EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, true, N);
5929 return;
5930 } else {
5931 SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
5932 true, dl, SDNodeOrder);
5933 }
5934 DAG.AddDbgValue(SDV, N.getNode(), isParameter);
5935 } else {
5936 // If Address is an argument then try to emit its dbg value using
5937 // virtual register info from the FuncInfo.ValueMap.
5938 if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, true,
5939 N)) {
5940 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI
5941 << " (could not emit func-arg dbg_value)\n");
5942 }
5943 }
5944 return;
5945 }
5946 case Intrinsic::dbg_label: {
5947 const DbgLabelInst &DI = cast<DbgLabelInst>(I);
5948 DILabel *Label = DI.getLabel();
5949 assert(Label && "Missing label");
5950
5951 SDDbgLabel *SDV;
5952 SDV = DAG.getDbgLabel(Label, dl, SDNodeOrder);
5953 DAG.AddDbgLabel(SDV);
5954 return;
5955 }
5956 case Intrinsic::dbg_value: {
5957 const DbgValueInst &DI = cast<DbgValueInst>(I);
5958 assert(DI.getVariable() && "Missing variable");
5959
5960 DILocalVariable *Variable = DI.getVariable();
5961 DIExpression *Expression = DI.getExpression();
5962 dropDanglingDebugInfo(Variable, Expression);
5963 const Value *V = DI.getValue();
5964 if (!V)
5965 return;
5966
5967 if (handleDebugValue(V, Variable, Expression, dl, DI.getDebugLoc(),
5968 SDNodeOrder))
5969 return;
5970
5971 // TODO: Dangling debug info will eventually either be resolved or produce
5972 // an Undef DBG_VALUE. However in the resolution case, a gap may appear
5973 // between the original dbg.value location and its resolved DBG_VALUE, which
5974 // we should ideally fill with an extra Undef DBG_VALUE.
5975
5976 DanglingDebugInfoMap[V].emplace_back(&DI, dl, SDNodeOrder);
5977 return;
5978 }
5979
5980 case Intrinsic::eh_typeid_for: {
5981 // Find the type id for the given typeinfo.
5982 GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0));
5983 unsigned TypeID = DAG.getMachineFunction().getTypeIDFor(GV);
5984 Res = DAG.getConstant(TypeID, sdl, MVT::i32);
5985 setValue(&I, Res);
5986 return;
5987 }
5988
5989 case Intrinsic::eh_return_i32:
5990 case Intrinsic::eh_return_i64:
5991 DAG.getMachineFunction().setCallsEHReturn(true);
5992 DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl,
5993 MVT::Other,
5994 getControlRoot(),
5995 getValue(I.getArgOperand(0)),
5996 getValue(I.getArgOperand(1))));
5997 return;
5998 case Intrinsic::eh_unwind_init:
5999 DAG.getMachineFunction().setCallsUnwindInit(true);
6000 return;
6001 case Intrinsic::eh_dwarf_cfa:
6002 setValue(&I, DAG.getNode(ISD::EH_DWARF_CFA, sdl,
6003 TLI.getPointerTy(DAG.getDataLayout()),
6004 getValue(I.getArgOperand(0))));
6005 return;
6006 case Intrinsic::eh_sjlj_callsite: {
6007 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
6008 ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(0));
6009 assert(CI && "Non-constant call site value in eh.sjlj.callsite!");
6010 assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!");
6011
6012 MMI.setCurrentCallSite(CI->getZExtValue());
6013 return;
6014 }
6015 case Intrinsic::eh_sjlj_functioncontext: {
6016 // Get and store the index of the function context.
6017 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
6018 AllocaInst *FnCtx =
6019 cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
6020 int FI = FuncInfo.StaticAllocaMap[FnCtx];
6021 MFI.setFunctionContextIndex(FI);
6022 return;
6023 }
6024 case Intrinsic::eh_sjlj_setjmp: {
6025 SDValue Ops[2];
6026 Ops[0] = getRoot();
6027 Ops[1] = getValue(I.getArgOperand(0));
6028 SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl,
6029 DAG.getVTList(MVT::i32, MVT::Other), Ops);
6030 setValue(&I, Op.getValue(0));
6031 DAG.setRoot(Op.getValue(1));
6032 return;
6033 }
6034 case Intrinsic::eh_sjlj_longjmp:
6035 DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
6036 getRoot(), getValue(I.getArgOperand(0))));
6037 return;
6038 case Intrinsic::eh_sjlj_setup_dispatch:
6039 DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_SETUP_DISPATCH, sdl, MVT::Other,
6040 getRoot()));
6041 return;
6042 case Intrinsic::masked_gather:
6043 visitMaskedGather(I);
6044 return;
6045 case Intrinsic::masked_load:
6046 visitMaskedLoad(I);
6047 return;
6048 case Intrinsic::masked_scatter:
6049 visitMaskedScatter(I);
6050 return;
6051 case Intrinsic::masked_store:
6052 visitMaskedStore(I);
6053 return;
6054 case Intrinsic::masked_expandload:
6055 visitMaskedLoad(I, true /* IsExpanding */);
6056 return;
6057 case Intrinsic::masked_compressstore:
6058 visitMaskedStore(I, true /* IsCompressing */);
6059 return;
6060 case Intrinsic::powi:
6061 setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
6062 getValue(I.getArgOperand(1)), DAG));
6063 return;
6064 case Intrinsic::log:
6065 setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6066 return;
6067 case Intrinsic::log2:
6068 setValue(&I,
6069 expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6070 return;
6071 case Intrinsic::log10:
6072 setValue(&I,
6073 expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6074 return;
6075 case Intrinsic::exp:
6076 setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6077 return;
6078 case Intrinsic::exp2:
6079 setValue(&I,
6080 expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6081 return;
6082 case Intrinsic::pow:
6083 setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
6084 getValue(I.getArgOperand(1)), DAG, TLI, Flags));
6085 return;
6086 case Intrinsic::sqrt:
6087 case Intrinsic::fabs:
6088 case Intrinsic::sin:
6089 case Intrinsic::cos:
6090 case Intrinsic::floor:
6091 case Intrinsic::ceil:
6092 case Intrinsic::trunc:
6093 case Intrinsic::rint:
6094 case Intrinsic::nearbyint:
6095 case Intrinsic::round:
6096 case Intrinsic::roundeven:
6097 case Intrinsic::canonicalize: {
6098 unsigned Opcode;
6099 switch (Intrinsic) {
6100 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
6101 case Intrinsic::sqrt: Opcode = ISD::FSQRT; break;
6102 case Intrinsic::fabs: Opcode = ISD::FABS; break;
6103 case Intrinsic::sin: Opcode = ISD::FSIN; break;
6104 case Intrinsic::cos: Opcode = ISD::FCOS; break;
6105 case Intrinsic::floor: Opcode = ISD::FFLOOR; break;
6106 case Intrinsic::ceil: Opcode = ISD::FCEIL; break;
6107 case Intrinsic::trunc: Opcode = ISD::FTRUNC; break;
6108 case Intrinsic::rint: Opcode = ISD::FRINT; break;
6109 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
6110 case Intrinsic::round: Opcode = ISD::FROUND; break;
6111 case Intrinsic::roundeven: Opcode = ISD::FROUNDEVEN; break;
6112 case Intrinsic::canonicalize: Opcode = ISD::FCANONICALIZE; break;
6113 }
6114
6115 setValue(&I, DAG.getNode(Opcode, sdl,
6116 getValue(I.getArgOperand(0)).getValueType(),
6117 getValue(I.getArgOperand(0)), Flags));
6118 return;
6119 }
6120 case Intrinsic::lround:
6121 case Intrinsic::llround:
6122 case Intrinsic::lrint:
6123 case Intrinsic::llrint: {
6124 unsigned Opcode;
6125 switch (Intrinsic) {
6126 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
6127 case Intrinsic::lround: Opcode = ISD::LROUND; break;
6128 case Intrinsic::llround: Opcode = ISD::LLROUND; break;
6129 case Intrinsic::lrint: Opcode = ISD::LRINT; break;
6130 case Intrinsic::llrint: Opcode = ISD::LLRINT; break;
6131 }
6132
6133 EVT RetVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6134 setValue(&I, DAG.getNode(Opcode, sdl, RetVT,
6135 getValue(I.getArgOperand(0))));
6136 return;
6137 }
6138 case Intrinsic::minnum:
6139 setValue(&I, DAG.getNode(ISD::FMINNUM, sdl,
6140 getValue(I.getArgOperand(0)).getValueType(),
6141 getValue(I.getArgOperand(0)),
6142 getValue(I.getArgOperand(1)), Flags));
6143 return;
6144 case Intrinsic::maxnum:
6145 setValue(&I, DAG.getNode(ISD::FMAXNUM, sdl,
6146 getValue(I.getArgOperand(0)).getValueType(),
6147 getValue(I.getArgOperand(0)),
6148 getValue(I.getArgOperand(1)), Flags));
6149 return;
6150 case Intrinsic::minimum:
6151 setValue(&I, DAG.getNode(ISD::FMINIMUM, sdl,
6152 getValue(I.getArgOperand(0)).getValueType(),
6153 getValue(I.getArgOperand(0)),
6154 getValue(I.getArgOperand(1)), Flags));
6155 return;
6156 case Intrinsic::maximum:
6157 setValue(&I, DAG.getNode(ISD::FMAXIMUM, sdl,
6158 getValue(I.getArgOperand(0)).getValueType(),
6159 getValue(I.getArgOperand(0)),
6160 getValue(I.getArgOperand(1)), Flags));
6161 return;
6162 case Intrinsic::copysign:
6163 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl,
6164 getValue(I.getArgOperand(0)).getValueType(),
6165 getValue(I.getArgOperand(0)),
6166 getValue(I.getArgOperand(1)), Flags));
6167 return;
6168 case Intrinsic::fma:
6169 setValue(&I, DAG.getNode(
6170 ISD::FMA, sdl, getValue(I.getArgOperand(0)).getValueType(),
6171 getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)),
6172 getValue(I.getArgOperand(2)), Flags));
6173 return;
6174#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
6175 case Intrinsic::INTRINSIC:
6176#include "llvm/IR/ConstrainedOps.def"
6177 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(I));
6178 return;
6179#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
6180#include "llvm/IR/VPIntrinsics.def"
6181 visitVectorPredicationIntrinsic(cast<VPIntrinsic>(I));
6182 return;
6183 case Intrinsic::fmuladd: {
6184 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6185 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
6186 TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
6187 setValue(&I, DAG.getNode(ISD::FMA, sdl,
6188 getValue(I.getArgOperand(0)).getValueType(),
6189 getValue(I.getArgOperand(0)),
6190 getValue(I.getArgOperand(1)),
6191 getValue(I.getArgOperand(2)), Flags));
6192 } else {
6193 // TODO: Intrinsic calls should have fast-math-flags.
6194 SDValue Mul = DAG.getNode(
6195 ISD::FMUL, sdl, getValue(I.getArgOperand(0)).getValueType(),
6196 getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)), Flags);
6197 SDValue Add = DAG.getNode(ISD::FADD, sdl,
6198 getValue(I.getArgOperand(0)).getValueType(),
6199 Mul, getValue(I.getArgOperand(2)), Flags);
6200 setValue(&I, Add);
6201 }
6202 return;
6203 }
6204 case Intrinsic::convert_to_fp16:
6205 setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16,
6206 DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16,
6207 getValue(I.getArgOperand(0)),
6208 DAG.getTargetConstant(0, sdl,
6209 MVT::i32))));
6210 return;
6211 case Intrinsic::convert_from_fp16:
6212 setValue(&I, DAG.getNode(ISD::FP_EXTEND, sdl,
6213 TLI.getValueType(DAG.getDataLayout(), I.getType()),
6214 DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
6215 getValue(I.getArgOperand(0)))));
6216 return;
6217 case Intrinsic::fptosi_sat: {
6218 EVT Type = TLI.getValueType(DAG.getDataLayout(), I.getType());
6219 SDValue SatW = DAG.getConstant(Type.getScalarSizeInBits(), sdl, MVT::i32);
6220 setValue(&I, DAG.getNode(ISD::FP_TO_SINT_SAT, sdl, Type,
6221 getValue(I.getArgOperand(0)), SatW));
6222 return;
6223 }
6224 case Intrinsic::fptoui_sat: {
6225 EVT Type = TLI.getValueType(DAG.getDataLayout(), I.getType());
6226 SDValue SatW = DAG.getConstant(Type.getScalarSizeInBits(), sdl, MVT::i32);
6227 setValue(&I, DAG.getNode(ISD::FP_TO_UINT_SAT, sdl, Type,
6228 getValue(I.getArgOperand(0)), SatW));
6229 return;
6230 }
6231 case Intrinsic::pcmarker: {
6232 SDValue Tmp = getValue(I.getArgOperand(0));
6233 DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
6234 return;
6235 }
6236 case Intrinsic::readcyclecounter: {
6237 SDValue Op = getRoot();
6238 Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,
6239 DAG.getVTList(MVT::i64, MVT::Other), Op);
6240 setValue(&I, Res);
6241 DAG.setRoot(Res.getValue(1));
6242 return;
6243 }
6244 case Intrinsic::bitreverse:
6245 setValue(&I, DAG.getNode(ISD::BITREVERSE, sdl,
6246 getValue(I.getArgOperand(0)).getValueType(),
6247 getValue(I.getArgOperand(0))));
6248 return;
6249 case Intrinsic::bswap:
6250 setValue(&I, DAG.getNode(ISD::BSWAP, sdl,
6251 getValue(I.getArgOperand(0)).getValueType(),
6252 getValue(I.getArgOperand(0))));
6253 return;
6254 case Intrinsic::cttz: {
6255 SDValue Arg = getValue(I.getArgOperand(0));
6256 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
6257 EVT Ty = Arg.getValueType();
6258 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
6259 sdl, Ty, Arg));
6260 return;
6261 }
6262 case Intrinsic::ctlz: {
6263 SDValue Arg = getValue(I.getArgOperand(0));
6264 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
6265 EVT Ty = Arg.getValueType();
6266 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
6267 sdl, Ty, Arg));
6268 return;
6269 }
6270 case Intrinsic::ctpop: {
6271 SDValue Arg = getValue(I.getArgOperand(0));
6272 EVT Ty = Arg.getValueType();
6273 setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
6274 return;
6275 }
6276 case Intrinsic::fshl:
6277 case Intrinsic::fshr: {
6278 bool IsFSHL = Intrinsic == Intrinsic::fshl;
6279 SDValue X = getValue(I.getArgOperand(0));
6280 SDValue Y = getValue(I.getArgOperand(1));
6281 SDValue Z = getValue(I.getArgOperand(2));
6282 EVT VT = X.getValueType();
6283
6284 if (X == Y) {
6285 auto RotateOpcode = IsFSHL ? ISD::ROTL : ISD::ROTR;
6286 setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, Z));
6287 } else {
6288 auto FunnelOpcode = IsFSHL ? ISD::FSHL : ISD::FSHR;
6289 setValue(&I, DAG.getNode(FunnelOpcode, sdl, VT, X, Y, Z));
6290 }
6291 return;
6292 }
6293 case Intrinsic::sadd_sat: {
6294 SDValue Op1 = getValue(I.getArgOperand(0));
6295 SDValue Op2 = getValue(I.getArgOperand(1));
6296 setValue(&I, DAG.getNode(ISD::SADDSAT, sdl, Op1.getValueType(), Op1, Op2));
6297 return;
6298 }
6299 case Intrinsic::uadd_sat: {
6300 SDValue Op1 = getValue(I.getArgOperand(0));
6301 SDValue Op2 = getValue(I.getArgOperand(1));
6302 setValue(&I, DAG.getNode(ISD::UADDSAT, sdl, Op1.getValueType(), Op1, Op2));
6303 return;
6304 }
6305 case Intrinsic::ssub_sat: {
6306 SDValue Op1 = getValue(I.getArgOperand(0));
6307 SDValue Op2 = getValue(I.getArgOperand(1));
6308 setValue(&I, DAG.getNode(ISD::SSUBSAT, sdl, Op1.getValueType(), Op1, Op2));
6309 return;
6310 }
6311 case Intrinsic::usub_sat: {
6312 SDValue Op1 = getValue(I.getArgOperand(0));
6313 SDValue Op2 = getValue(I.getArgOperand(1));
6314 setValue(&I, DAG.getNode(ISD::USUBSAT, sdl, Op1.getValueType(), Op1, Op2));
6315 return;
6316 }
6317 case Intrinsic::sshl_sat: {
6318 SDValue Op1 = getValue(I.getArgOperand(0));
6319 SDValue Op2 = getValue(I.getArgOperand(1));
6320 setValue(&I, DAG.getNode(ISD::SSHLSAT, sdl, Op1.getValueType(), Op1, Op2));
6321 return;
6322 }
6323 case Intrinsic::ushl_sat: {
6324 SDValue Op1 = getValue(I.getArgOperand(0));
6325 SDValue Op2 = getValue(I.getArgOperand(1));
6326 setValue(&I, DAG.getNode(ISD::USHLSAT, sdl, Op1.getValueType(), Op1, Op2));
6327 return;
6328 }
6329 case Intrinsic::smul_fix:
6330 case Intrinsic::umul_fix:
6331 case Intrinsic::smul_fix_sat:
6332 case Intrinsic::umul_fix_sat: {
6333 SDValue Op1 = getValue(I.getArgOperand(0));
6334 SDValue Op2 = getValue(I.getArgOperand(1));
6335 SDValue Op3 = getValue(I.getArgOperand(2));
6336 setValue(&I, DAG.getNode(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
6337 Op1.getValueType(), Op1, Op2, Op3));
6338 return;
6339 }
6340 case Intrinsic::sdiv_fix:
6341 case Intrinsic::udiv_fix:
6342 case Intrinsic::sdiv_fix_sat:
6343 case Intrinsic::udiv_fix_sat: {
6344 SDValue Op1 = getValue(I.getArgOperand(0));
6345 SDValue Op2 = getValue(I.getArgOperand(1));
6346 SDValue Op3 = getValue(I.getArgOperand(2));
6347 setValue(&I, expandDivFix(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
6348 Op1, Op2, Op3, DAG, TLI));
6349 return;
6350 }
6351 case Intrinsic::smax: {
6352 SDValue Op1 = getValue(I.getArgOperand(0));
6353 SDValue Op2 = getValue(I.getArgOperand(1));
6354 setValue(&I, DAG.getNode(ISD::SMAX, sdl, Op1.getValueType(), Op1, Op2));
6355 return;
6356 }
6357 case Intrinsic::smin: {
6358 SDValue Op1 = getValue(I.getArgOperand(0));
6359 SDValue Op2 = getValue(I.getArgOperand(1));
6360 setValue(&I, DAG.getNode(ISD::SMIN, sdl, Op1.getValueType(), Op1, Op2));
6361 return;
6362 }
6363 case Intrinsic::umax: {
6364 SDValue Op1 = getValue(I.getArgOperand(0));
6365 SDValue Op2 = getValue(I.getArgOperand(1));
6366 setValue(&I, DAG.getNode(ISD::UMAX, sdl, Op1.getValueType(), Op1, Op2));
6367 return;
6368 }
6369 case Intrinsic::umin: {
6370 SDValue Op1 = getValue(I.getArgOperand(0));
6371 SDValue Op2 = getValue(I.getArgOperand(1));
6372 setValue(&I, DAG.getNode(ISD::UMIN, sdl, Op1.getValueType(), Op1, Op2));
6373 return;
6374 }
6375 case Intrinsic::abs: {
6376 // TODO: Preserve "int min is poison" arg in SDAG?
6377 SDValue Op1 = getValue(I.getArgOperand(0));
6378 setValue(&I, DAG.getNode(ISD::ABS, sdl, Op1.getValueType(), Op1));
6379 return;
6380 }
6381 case Intrinsic::stacksave: {
6382 SDValue Op = getRoot();
6383 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6384 Res = DAG.getNode(ISD::STACKSAVE, sdl, DAG.getVTList(VT, MVT::Other), Op);
6385 setValue(&I, Res);
6386 DAG.setRoot(Res.getValue(1));
6387 return;
6388 }
6389 case Intrinsic::stackrestore:
6390 Res = getValue(I.getArgOperand(0));
6391 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
6392 return;
6393 case Intrinsic::get_dynamic_area_offset: {
6394 SDValue Op = getRoot();
6395 EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout());
6396 EVT ResTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
6397 // Result type for @llvm.get.dynamic.area.offset should match PtrTy for
6398 // target.
6399 if (PtrTy.getFixedSizeInBits() < ResTy.getFixedSizeInBits())
6400 report_fatal_error("Wrong result type for @llvm.get.dynamic.area.offset"
6401 " intrinsic!");
6402 Res = DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl, DAG.getVTList(ResTy),
6403 Op);
6404 DAG.setRoot(Op);
6405 setValue(&I, Res);
6406 return;
6407 }
6408 case Intrinsic::stackguard: {
6409 MachineFunction &MF = DAG.getMachineFunction();
6410 const Module &M = *MF.getFunction().getParent();
6411 SDValue Chain = getRoot();
6412 if (TLI.useLoadStackGuardNode()) {
6413 Res = getLoadStackGuard(DAG, sdl, Chain);
6414 } else {
6415 EVT PtrTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
6416 const Value *Global = TLI.getSDagStackGuard(M);
6417 Align Align = DL->getPrefTypeAlign(Global->getType());
6418 Res = DAG.getLoad(PtrTy, sdl, Chain, getValue(Global),
6419 MachinePointerInfo(Global, 0), Align,
6420 MachineMemOperand::MOVolatile);
6421 }
6422 if (TLI.useStackGuardXorFP())
6423 Res = TLI.emitStackGuardXorFP(DAG, Res, sdl);
6424 DAG.setRoot(Chain);
6425 setValue(&I, Res);
6426 return;
6427 }
6428 case Intrinsic::stackprotector: {
6429 // Emit code into the DAG to store the stack guard onto the stack.
6430 MachineFunction &MF = DAG.getMachineFunction();
6431 MachineFrameInfo &MFI = MF.getFrameInfo();
6432 SDValue Src, Chain = getRoot();
6433
6434 if (TLI.useLoadStackGuardNode())
6435 Src = getLoadStackGuard(DAG, sdl, Chain);
6436 else
6437 Src = getValue(I.getArgOperand(0)); // The guard's value.
6438
6439 AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
6440
6441 int FI = FuncInfo.StaticAllocaMap[Slot];
6442 MFI.setStackProtectorIndex(FI);
6443 EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout());
6444
6445 SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
6446
6447 // Store the stack protector onto the stack.
6448 Res = DAG.getStore(
6449 Chain, sdl, Src, FIN,
6450 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
6451 MaybeAlign(), MachineMemOperand::MOVolatile);
6452 setValue(&I, Res);
6453 DAG.setRoot(Res);
6454 return;
6455 }
6456 case Intrinsic::objectsize:
6457 llvm_unreachable("llvm.objectsize.* should have been lowered already");
6458
6459 case Intrinsic::is_constant:
6460 llvm_unreachable("llvm.is.constant.* should have been lowered already");
6461
6462 case Intrinsic::annotation:
6463 case Intrinsic::ptr_annotation:
6464 case Intrinsic::launder_invariant_group:
6465 case Intrinsic::strip_invariant_group:
6466 // Drop the intrinsic, but forward the value
6467 setValue(&I, getValue(I.getOperand(0)));
6468 return;
6469
6470 case Intrinsic::assume:
6471 case Intrinsic::experimental_noalias_scope_decl:
6472 case Intrinsic::var_annotation:
6473 case Intrinsic::sideeffect:
6474 // Discard annotate attributes, noalias scope declarations, assumptions, and
6475 // artificial side-effects.
6476 return;
6477
6478 case Intrinsic::codeview_annotation: {
6479 // Emit a label associated with this metadata.
6480 MachineFunction &MF = DAG.getMachineFunction();
6481 MCSymbol *Label =
6482 MF.getMMI().getContext().createTempSymbol("annotation", true);
6483 Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(0))->getMetadata();
6484 MF.addCodeViewAnnotation(Label, cast<MDNode>(MD));
6485 Res = DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl, getRoot(), Label);
6486 DAG.setRoot(Res);
6487 return;
6488 }
6489
6490 case Intrinsic::init_trampoline: {
6491 const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
6492
6493 SDValue Ops[6];
6494 Ops[0] = getRoot();
6495 Ops[1] = getValue(I.getArgOperand(0));
6496 Ops[2] = getValue(I.getArgOperand(1));
6497 Ops[3] = getValue(I.getArgOperand(2));
6498 Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
6499 Ops[5] = DAG.getSrcValue(F);
6500
6501 Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops);
6502
6503 DAG.setRoot(Res);
6504 return;
6505 }
6506 case Intrinsic::adjust_trampoline:
6507 setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
6508 TLI.getPointerTy(DAG.getDataLayout()),
6509 getValue(I.getArgOperand(0))));
6510 return;
6511 case Intrinsic::gcroot: {
6512 assert(DAG.getMachineFunction().getFunction().hasGC() &&
6513 "only valid in functions with gc specified, enforced by Verifier");
6514 assert(GFI && "implied by previous");
6515 const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
6516 const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
6517
6518 FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
6519 GFI->addStackRoot(FI->getIndex(), TypeMap);
6520 return;
6521 }
6522 case Intrinsic::gcread:
6523 case Intrinsic::gcwrite:
6524 llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
6525 case Intrinsic::flt_rounds:
6526 Res = DAG.getNode(ISD::FLT_ROUNDS_, sdl, {MVT::i32, MVT::Other}, getRoot());
6527 setValue(&I, Res);
6528 DAG.setRoot(Res.getValue(1));
6529 return;
6530
6531 case Intrinsic::expect:
6532 // Just replace __builtin_expect(exp, c) with EXP.
6533 setValue(&I, getValue(I.getArgOperand(0)));
6534 return;
6535
6536 case Intrinsic::ubsantrap:
6537 case Intrinsic::debugtrap:
6538 case Intrinsic::trap: {
6539 StringRef TrapFuncName =
6540 I.getAttributes()
6541 .getAttribute(AttributeList::FunctionIndex, "trap-func-name")
6542 .getValueAsString();
6543 if (TrapFuncName.empty()) {
6544 switch (Intrinsic) {
6545 case Intrinsic::trap:
6546 DAG.setRoot(DAG.getNode(ISD::TRAP, sdl, MVT::Other, getRoot()));
6547 break;
6548 case Intrinsic::debugtrap:
6549 DAG.setRoot(DAG.getNode(ISD::DEBUGTRAP, sdl, MVT::Other, getRoot()));
6550 break;
6551 case Intrinsic::ubsantrap:
6552 DAG.setRoot(DAG.getNode(
6553 ISD::UBSANTRAP, sdl, MVT::Other, getRoot(),
6554 DAG.getTargetConstant(
6555 cast<ConstantInt>(I.getArgOperand(0))->getZExtValue(), sdl,
6556 MVT::i32)));
6557 break;
6558 default: llvm_unreachable("unknown trap intrinsic");
6559 }
6560 return;
6561 }
6562 TargetLowering::ArgListTy Args;
6563 if (Intrinsic == Intrinsic::ubsantrap) {
6564 Args.push_back(TargetLoweringBase::ArgListEntry());
6565 Args[0].Val = I.getArgOperand(0);
6566 Args[0].Node = getValue(Args[0].Val);
6567 Args[0].Ty = Args[0].Val->getType();
6568 }
6569
6570 TargetLowering::CallLoweringInfo CLI(DAG);
6571 CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee(
6572 CallingConv::C, I.getType(),
6573 DAG.getExternalSymbol(TrapFuncName.data(),
6574 TLI.getPointerTy(DAG.getDataLayout())),
6575 std::move(Args));
6576
6577 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
6578 DAG.setRoot(Result.second);
6579 return;
6580 }
6581
6582 case Intrinsic::uadd_with_overflow:
6583 case Intrinsic::sadd_with_overflow:
6584 case Intrinsic::usub_with_overflow:
6585 case Intrinsic::ssub_with_overflow:
6586 case Intrinsic::umul_with_overflow:
6587 case Intrinsic::smul_with_overflow: {
6588 ISD::NodeType Op;
6589 switch (Intrinsic) {
6590 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
6591 case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
6592 case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
6593 case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
6594 case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
6595 case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
6596 case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
6597 }
6598 SDValue Op1 = getValue(I.getArgOperand(0));
6599 SDValue Op2 = getValue(I.getArgOperand(1));
6600
6601 EVT ResultVT = Op1.getValueType();
6602 EVT OverflowVT = MVT::i1;
6603 if (ResultVT.isVector())
6604 OverflowVT = EVT::getVectorVT(
6605 *Context, OverflowVT, ResultVT.getVectorElementCount());
6606
6607 SDVTList VTs = DAG.getVTList(ResultVT, OverflowVT);
6608 setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
6609 return;
6610 }
6611 case Intrinsic::prefetch: {
6612 SDValue Ops[5];
6613 unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
6614 auto Flags = rw == 0 ? MachineMemOperand::MOLoad :MachineMemOperand::MOStore;
6615 Ops[0] = DAG.getRoot();
6616 Ops[1] = getValue(I.getArgOperand(0));
6617 Ops[2] = getValue(I.getArgOperand(1));
6618 Ops[3] = getValue(I.getArgOperand(2));
6619 Ops[4] = getValue(I.getArgOperand(3));
6620 SDValue Result = DAG.getMemIntrinsicNode(
6621 ISD::PREFETCH, sdl, DAG.getVTList(MVT::Other), Ops,
6622 EVT::getIntegerVT(*Context, 8), MachinePointerInfo(I.getArgOperand(0)),
6623 /* align */ None, Flags);
6624
6625 // Chain the prefetch in parallell with any pending loads, to stay out of
6626 // the way of later optimizations.
6627 PendingLoads.push_back(Result);
6628 Result = getRoot();
6629 DAG.setRoot(Result);
6630 return;
6631 }
6632 case Intrinsic::lifetime_start:
6633 case Intrinsic::lifetime_end: {
6634 bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
6635 // Stack coloring is not enabled in O0, discard region information.
6636 if (TM.getOptLevel() == CodeGenOpt::None)
6637 return;
6638
6639 const int64_t ObjectSize =
6640 cast<ConstantInt>(I.getArgOperand(0))->getSExtValue();
6641 Value *const ObjectPtr = I.getArgOperand(1);
6642 SmallVector<const Value *, 4> Allocas;
6643 getUnderlyingObjects(ObjectPtr, Allocas);
6644
6645 for (SmallVectorImpl<const Value*>::iterator Object = Allocas.begin(),
6646 E = Allocas.end(); Object != E; ++Object) {
6647 const AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(*Object);
6648
6649 // Could not find an Alloca.
6650 if (!LifetimeObject)
6651 continue;
6652
6653 // First check that the Alloca is static, otherwise it won't have a
6654 // valid frame index.
6655 auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject);
6656 if (SI == FuncInfo.StaticAllocaMap.end())
6657 return;
6658
6659 const int FrameIndex = SI->second;
6660 int64_t Offset;
6661 if (GetPointerBaseWithConstantOffset(
6662 ObjectPtr, Offset, DAG.getDataLayout()) != LifetimeObject)
6663 Offset = -1; // Cannot determine offset from alloca to lifetime object.
6664 Res = DAG.getLifetimeNode(IsStart, sdl, getRoot(), FrameIndex, ObjectSize,
6665 Offset);
6666 DAG.setRoot(Res);
6667 }
6668 return;
6669 }
6670 case Intrinsic::pseudoprobe: {
6671 auto Guid = cast<ConstantInt>(I.getArgOperand(0))->getZExtValue();
6672 auto Index = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
6673 auto Attr = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
6674 Res = DAG.getPseudoProbeNode(sdl, getRoot(), Guid, Index, Attr);
6675 DAG.setRoot(Res);
6676 return;
6677 }
6678 case Intrinsic::invariant_start:
6679 // Discard region information.
6680 setValue(&I, DAG.getUNDEF(TLI.getPointerTy(DAG.getDataLayout())));
6681 return;
6682 case Intrinsic::invariant_end:
6683 // Discard region information.
6684 return;
6685 case Intrinsic::clear_cache:
6686 /// FunctionName may be null.
6687 if (const char *FunctionName = TLI.getClearCacheBuiltinName())
6688 lowerCallToExternalSymbol(I, FunctionName);
6689 return;
6690 case Intrinsic::donothing:
6691 // ignore
6692 return;
6693 case Intrinsic::experimental_stackmap:
6694 visitStackmap(I);
6695 return;
6696 case Intrinsic::experimental_patchpoint_void:
6697 case Intrinsic::experimental_patchpoint_i64:
6698 visitPatchpoint(I);
6699 return;
6700 case Intrinsic::experimental_gc_statepoint:
6701 LowerStatepoint(cast<GCStatepointInst>(I));
6702 return;
6703 case Intrinsic::experimental_gc_result:
6704 visitGCResult(cast<GCResultInst>(I));
6705 return;
6706 case Intrinsic::experimental_gc_relocate:
6707 visitGCRelocate(cast<GCRelocateInst>(I));
6708 return;
6709 case Intrinsic::instrprof_increment:
6710 llvm_unreachable("instrprof failed to lower an increment");
6711 case Intrinsic::instrprof_value_profile:
6712 llvm_unreachable("instrprof failed to lower a value profiling call");
6713 case Intrinsic::localescape: {
6714 MachineFunction &MF = DAG.getMachineFunction();
6715 const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
6716
6717 // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
6718 // is the same on all targets.
6719 for (unsigned Idx = 0, E = I.getNumArgOperands(); Idx < E; ++Idx) {
6720 Value *Arg = I.getArgOperand(Idx)->stripPointerCasts();
6721 if (isa<ConstantPointerNull>(Arg))
6722 continue; // Skip null pointers. They represent a hole in index space.
6723 AllocaInst *Slot = cast<AllocaInst>(Arg);
6724 assert(FuncInfo.StaticAllocaMap.count(Slot) &&
6725 "can only escape static allocas");
6726 int FI = FuncInfo.StaticAllocaMap[Slot];
6727 MCSymbol *FrameAllocSym =
6728 MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
6729 GlobalValue::dropLLVMManglingEscape(MF.getName()), Idx);
6730 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl,
6731 TII->get(TargetOpcode::LOCAL_ESCAPE))
6732 .addSym(FrameAllocSym)
6733 .addFrameIndex(FI);
6734 }
6735
6736 return;
6737 }
6738
6739 case Intrinsic::localrecover: {
6740 // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx)
6741 MachineFunction &MF = DAG.getMachineFunction();
6742
6743 // Get the symbol that defines the frame offset.
6744 auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts());
6745 auto *Idx = cast<ConstantInt>(I.getArgOperand(2));
6746 unsigned IdxVal =
6747 unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
6748 MCSymbol *FrameAllocSym =
6749 MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
6750 GlobalValue::dropLLVMManglingEscape(Fn->getName()), IdxVal);
6751
6752 Value *FP = I.getArgOperand(1);
6753 SDValue FPVal = getValue(FP);
6754 EVT PtrVT = FPVal.getValueType();
6755
6756 // Create a MCSymbol for the label to avoid any target lowering
6757 // that would make this PC relative.
6758 SDValue OffsetSym = DAG.getMCSymbol(FrameAllocSym, PtrVT);
6759 SDValue OffsetVal =
6760 DAG.getNode(ISD::LOCAL_RECOVER, sdl, PtrVT, OffsetSym);
6761
6762 // Add the offset to the FP.
6763 SDValue Add = DAG.getMemBasePlusOffset(FPVal, OffsetVal, sdl);
6764 setValue(&I, Add);
6765
6766 return;
6767 }
6768
6769 case Intrinsic::eh_exceptionpointer:
6770 case Intrinsic::eh_exceptioncode: {
6771 // Get the exception pointer vreg, copy from it, and resize it to fit.
6772 const auto *CPI = cast<CatchPadInst>(I.getArgOperand(0));
6773 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
6774 const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT);
6775 unsigned VReg = FuncInfo.getCatchPadExceptionPointerVReg(CPI, PtrRC);
6776 SDValue N =
6777 DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(), VReg, PtrVT);
6778 if (Intrinsic == Intrinsic::eh_exceptioncode)
6779 N = DAG.getZExtOrTrunc(N, getCurSDLoc(), MVT::i32);
6780 setValue(&I, N);
6781 return;
6782 }
6783 case Intrinsic::xray_customevent: {
6784 // Here we want to make sure that the intrinsic behaves as if it has a
6785 // specific calling convention, and only for x86_64.
6786 // FIXME: Support other platforms later.
6787 const auto &Triple = DAG.getTarget().getTargetTriple();
6788 if (Triple.getArch() != Triple::x86_64)
6789 return;
6790
6791 SDLoc DL = getCurSDLoc();
6792 SmallVector<SDValue, 8> Ops;
6793
6794 // We want to say that we always want the arguments in registers.
6795 SDValue LogEntryVal = getValue(I.getArgOperand(0));
6796 SDValue StrSizeVal = getValue(I.getArgOperand(1));
6797 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
6798 SDValue Chain = getRoot();
6799 Ops.push_back(LogEntryVal);
6800 Ops.push_back(StrSizeVal);
6801 Ops.push_back(Chain);
6802
6803 // We need to enforce the calling convention for the callsite, so that
6804 // argument ordering is enforced correctly, and that register allocation can
6805 // see that some registers may be assumed clobbered and have to preserve
6806 // them across calls to the intrinsic.
6807 MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
6808 DL, NodeTys, Ops);
6809 SDValue patchableNode = SDValue(MN, 0);
6810 DAG.setRoot(patchableNode);
6811 setValue(&I, patchableNode);
6812 return;
6813 }
6814 case Intrinsic::xray_typedevent: {
6815 // Here we want to make sure that the intrinsic behaves as if it has a
6816 // specific calling convention, and only for x86_64.
6817 // FIXME: Support other platforms later.
6818 const auto &Triple = DAG.getTarget().getTargetTriple();
6819 if (Triple.getArch() != Triple::x86_64)
6820 return;
6821
6822 SDLoc DL = getCurSDLoc();
6823 SmallVector<SDValue, 8> Ops;
6824
6825 // We want to say that we always want the arguments in registers.
6826 // It's unclear to me how manipulating the selection DAG here forces callers
6827 // to provide arguments in registers instead of on the stack.
6828 SDValue LogTypeId = getValue(I.getArgOperand(0));
6829 SDValue LogEntryVal = getValue(I.getArgOperand(1));
6830 SDValue StrSizeVal = getValue(I.getArgOperand(2));
6831 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
6832 SDValue Chain = getRoot();
6833 Ops.push_back(LogTypeId);
6834 Ops.push_back(LogEntryVal);
6835 Ops.push_back(StrSizeVal);
6836 Ops.push_back(Chain);
6837
6838 // We need to enforce the calling convention for the callsite, so that
6839 // argument ordering is enforced correctly, and that register allocation can
6840 // see that some registers may be assumed clobbered and have to preserve
6841 // them across calls to the intrinsic.
6842 MachineSDNode *MN = DAG.getMachineNode(
6843 TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, DL, NodeTys, Ops);
6844 SDValue patchableNode = SDValue(MN, 0);
6845 DAG.setRoot(patchableNode);
6846 setValue(&I, patchableNode);
6847 return;
6848 }
6849 case Intrinsic::experimental_deoptimize:
6850 LowerDeoptimizeCall(&I);
6851 return;
6852
6853 case Intrinsic::vector_reduce_fadd:
6854 case Intrinsic::vector_reduce_fmul:
6855 case Intrinsic::vector_reduce_add:
6856 case Intrinsic::vector_reduce_mul:
6857 case Intrinsic::vector_reduce_and:
6858 case Intrinsic::vector_reduce_or:
6859 case Intrinsic::vector_reduce_xor:
6860 case Intrinsic::vector_reduce_smax:
6861 case Intrinsic::vector_reduce_smin:
6862 case Intrinsic::vector_reduce_umax:
6863 case Intrinsic::vector_reduce_umin:
6864 case Intrinsic::vector_reduce_fmax:
6865 case Intrinsic::vector_reduce_fmin:
6866 visitVectorReduce(I, Intrinsic);
6867 return;
6868
6869 case Intrinsic::icall_branch_funnel: {
6870 SmallVector<SDValue, 16> Ops;
6871 Ops.push_back(getValue(I.getArgOperand(0)));
6872
6873 int64_t Offset;
6874 auto *Base = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
6875 I.getArgOperand(1), Offset, DAG.getDataLayout()));
6876 if (!Base)
6877 report_fatal_error(
6878 "llvm.icall.branch.funnel operand must be a GlobalValue");
6879 Ops.push_back(DAG.getTargetGlobalAddress(Base, getCurSDLoc(), MVT::i64, 0));
6880
6881 struct BranchFunnelTarget {
6882 int64_t Offset;
6883 SDValue Target;
6884 };
6885 SmallVector<BranchFunnelTarget, 8> Targets;
6886
6887 for (unsigned Op = 1, N = I.getNumArgOperands(); Op != N; Op += 2) {
6888 auto *ElemBase = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
6889 I.getArgOperand(Op), Offset, DAG.getDataLayout()));
6890 if (ElemBase != Base)
6891 report_fatal_error("all llvm.icall.branch.funnel operands must refer "
6892 "to the same GlobalValue");
6893
6894 SDValue Val = getValue(I.getArgOperand(Op + 1));
6895 auto *GA = dyn_cast<GlobalAddressSDNode>(Val);
6896 if (!GA)
6897 report_fatal_error(
6898 "llvm.icall.branch.funnel operand must be a GlobalValue");
6899 Targets.push_back({Offset, DAG.getTargetGlobalAddress(
6900 GA->getGlobal(), getCurSDLoc(),
6901 Val.getValueType(), GA->getOffset())});
6902 }
6903 llvm::sort(Targets,
6904 [](const BranchFunnelTarget &T1, const BranchFunnelTarget &T2) {
6905 return T1.Offset < T2.Offset;
6906 });
6907
6908 for (auto &T : Targets) {
6909 Ops.push_back(DAG.getTargetConstant(T.Offset, getCurSDLoc(), MVT::i32));
6910 Ops.push_back(T.Target);
6911 }
6912
6913 Ops.push_back(DAG.getRoot()); // Chain
6914 SDValue N(DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL,
6915 getCurSDLoc(), MVT::Other, Ops),
6916 0);
6917 DAG.setRoot(N);
6918 setValue(&I, N);
6919 HasTailCall = true;
6920 return;
6921 }
6922
6923 case Intrinsic::wasm_landingpad_index:
6924 // Information this intrinsic contained has been transferred to
6925 // MachineFunction in SelectionDAGISel::PrepareEHLandingPad. We can safely
6926 // delete it now.
6927 return;
6928
6929 case Intrinsic::aarch64_settag:
6930 case Intrinsic::aarch64_settag_zero: {
6931 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6932 bool ZeroMemory = Intrinsic == Intrinsic::aarch64_settag_zero;
6933 SDValue Val = TSI.EmitTargetCodeForSetTag(
6934 DAG, getCurSDLoc(), getRoot(), getValue(I.getArgOperand(0)),
6935 getValue(I.getArgOperand(1)), MachinePointerInfo(I.getArgOperand(0)),
6936 ZeroMemory);
6937 DAG.setRoot(Val);
6938 setValue(&I, Val);
6939 return;
6940 }
6941 case Intrinsic::ptrmask: {
6942 SDValue Ptr = getValue(I.getOperand(0));
6943 SDValue Const = getValue(I.getOperand(1));
6944
6945 EVT PtrVT = Ptr.getValueType();
6946 setValue(&I, DAG.getNode(ISD::AND, getCurSDLoc(), PtrVT, Ptr,
6947 DAG.getZExtOrTrunc(Const, getCurSDLoc(), PtrVT)));
6948 return;
6949 }
6950 case Intrinsic::get_active_lane_mask: {
6951 auto DL = getCurSDLoc();
6952 SDValue Index = getValue(I.getOperand(0));
6953 SDValue TripCount = getValue(I.getOperand(1));
6954 Type *ElementTy = I.getOperand(0)->getType();
6955 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6956 unsigned VecWidth = VT.getVectorNumElements();
6957
6958 SmallVector<SDValue, 16> OpsTripCount;
6959 SmallVector<SDValue, 16> OpsIndex;
6960 SmallVector<SDValue, 16> OpsStepConstants;
6961 for (unsigned i = 0; i < VecWidth; i++) {
6962 OpsTripCount.push_back(TripCount);
6963 OpsIndex.push_back(Index);
6964 OpsStepConstants.push_back(
6965 DAG.getConstant(i, DL, EVT::getEVT(ElementTy)));
6966 }
6967
6968 EVT CCVT = EVT::getVectorVT(I.getContext(), MVT::i1, VecWidth);
6969
6970 auto VecTy = EVT::getEVT(FixedVectorType::get(ElementTy, VecWidth));
6971 SDValue VectorIndex = DAG.getBuildVector(VecTy, DL, OpsIndex);
6972 SDValue VectorStep = DAG.getBuildVector(VecTy, DL, OpsStepConstants);
6973 SDValue VectorInduction = DAG.getNode(
6974 ISD::UADDO, DL, DAG.getVTList(VecTy, CCVT), VectorIndex, VectorStep);
6975 SDValue VectorTripCount = DAG.getBuildVector(VecTy, DL, OpsTripCount);
6976 SDValue SetCC = DAG.getSetCC(DL, CCVT, VectorInduction.getValue(0),
6977 VectorTripCount, ISD::CondCode::SETULT);
6978 setValue(&I, DAG.getNode(ISD::AND, DL, CCVT,
6979 DAG.getNOT(DL, VectorInduction.getValue(1), CCVT),
6980 SetCC));
6981 return;
6982 }
6983 case Intrinsic::experimental_vector_insert: {
6984 auto DL = getCurSDLoc();
6985
6986 SDValue Vec = getValue(I.getOperand(0));
6987 SDValue SubVec = getValue(I.getOperand(1));
6988 SDValue Index = getValue(I.getOperand(2));
6989 EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6990 setValue(&I, DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ResultVT, Vec, SubVec,
6991 Index));
6992 return;
6993 }
6994 case Intrinsic::experimental_vector_extract: {
6995 auto DL = getCurSDLoc();
6996
6997 SDValue Vec = getValue(I.getOperand(0));
6998 SDValue Index = getValue(I.getOperand(1));
6999 EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7000
7001 setValue(&I, DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ResultVT, Vec, Index));
7002 return;
7003 }
7004 }
7005}
7006
7007void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
7008 const ConstrainedFPIntrinsic &FPI) {
7009 SDLoc sdl = getCurSDLoc();
7010
7011 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7012 SmallVector<EVT, 4> ValueVTs;
7013 ComputeValueVTs(TLI, DAG.getDataLayout(), FPI.getType(), ValueVTs);
7014 ValueVTs.push_back(MVT::Other); // Out chain
7015
7016 // We do not need to serialize constrained FP intrinsics against
7017 // each other or against (nonvolatile) loads, so they can be
7018 // chained like loads.
7019 SDValue Chain = DAG.getRoot();
7020 SmallVector<SDValue, 4> Opers;
7021 Opers.push_back(Chain);
7022 if (FPI.isUnaryOp()) {
7023 Opers.push_back(getValue(FPI.getArgOperand(0)));
7024 } else if (FPI.isTernaryOp()) {
7025 Opers.push_back(getValue(FPI.getArgOperand(0)));
7026 Opers.push_back(getValue(FPI.getArgOperand(1)));
7027 Opers.push_back(getValue(FPI.getArgOperand(2)));
7028 } else {
7029 Opers.push_back(getValue(FPI.getArgOperand(0)));
7030 Opers.push_back(getValue(FPI.getArgOperand(1)));
7031 }
7032
7033 auto pushOutChain = [this](SDValue Result, fp::ExceptionBehavior EB) {
7034 assert(Result.getNode()->getNumValues() == 2);
7035
7036 // Push node to the appropriate list so that future instructions can be
7037 // chained up correctly.
7038 SDValue OutChain = Result.getValue(1);
7039 switch (EB) {
7040 case fp::ExceptionBehavior::ebIgnore:
7041 // The only reason why ebIgnore nodes still need to be chained is that
7042 // they might depend on the current rounding mode, and therefore must
7043 // not be moved across instruction that may change that mode.
7044 LLVM_FALLTHROUGH;
7045 case fp::ExceptionBehavior::ebMayTrap:
7046 // These must not be moved across calls or instructions that may change
7047 // floating-point exception masks.
7048 PendingConstrainedFP.push_back(OutChain);
7049 break;
7050 case fp::ExceptionBehavior::ebStrict:
7051 // These must not be moved across calls or instructions that may change
7052 // floating-point exception masks or read floating-point exception flags.
7053 // In addition, they cannot be optimized out even if unused.
7054 PendingConstrainedFPStrict.push_back(OutChain);
7055 break;
7056 }
7057 };
7058
7059 SDVTList VTs = DAG.getVTList(ValueVTs);
7060 fp::ExceptionBehavior EB = FPI.getExceptionBehavior().getValue();
7061
7062 SDNodeFlags Flags;
7063 if (EB == fp::ExceptionBehavior::ebIgnore)
7064 Flags.setNoFPExcept(true);
7065
7066 if (auto *FPOp = dyn_cast<FPMathOperator>(&FPI))
7067 Flags.copyFMF(*FPOp);
7068
7069 unsigned Opcode;
7070 switch (FPI.getIntrinsicID()) {
7071 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
7072#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
7073 case Intrinsic::INTRINSIC: \
7074 Opcode = ISD::STRICT_##DAGN; \
7075 break;
7076#include "llvm/IR/ConstrainedOps.def"
7077 case Intrinsic::experimental_constrained_fmuladd: {
7078 Opcode = ISD::STRICT_FMA;
7079 // Break fmuladd into fmul and fadd.
7080 if (TM.Options.AllowFPOpFusion == FPOpFusion::Strict ||
7081 !TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(),
7082 ValueVTs[0])) {
7083 Opers.pop_back();
7084 SDValue Mul = DAG.getNode(ISD::STRICT_FMUL, sdl, VTs, Opers, Flags);
7085 pushOutChain(Mul, EB);
7086 Opcode = ISD::STRICT_FADD;
7087 Opers.clear();
7088 Opers.push_back(Mul.getValue(1));
7089 Opers.push_back(Mul.getValue(0));
7090 Opers.push_back(getValue(FPI.getArgOperand(2)));
7091 }
7092 break;
7093 }
7094 }
7095
7096 // A few strict DAG nodes carry additional operands that are not
7097 // set up by the default code above.
7098 switch (Opcode) {
7099 default: break;
7100 case ISD::STRICT_FP_ROUND:
7101 Opers.push_back(
7102 DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())));
7103 break;
7104 case ISD::STRICT_FSETCC:
7105 case ISD::STRICT_FSETCCS: {
7106 auto *FPCmp = dyn_cast<ConstrainedFPCmpIntrinsic>(&FPI);
7107 Opers.push_back(DAG.getCondCode(getFCmpCondCode(FPCmp->getPredicate())));
7108 break;
7109 }
7110 }
7111
7112 SDValue Result = DAG.getNode(Opcode, sdl, VTs, Opers, Flags);
7113 pushOutChain(Result, EB);
7114
7115 SDValue FPResult = Result.getValue(0);
7116 setValue(&FPI, FPResult);
7117}
7118
7119static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin) {
7120 Optional<unsigned> ResOPC;
7121 switch (VPIntrin.getIntrinsicID()) {
7122#define BEGIN_REGISTER_VP_INTRINSIC(INTRIN, ...) case Intrinsic::INTRIN:
7123#define BEGIN_REGISTER_VP_SDNODE(VPSDID, ...) ResOPC = ISD::VPSDID;
7124#define END_REGISTER_VP_INTRINSIC(...) break;
7125#include "llvm/IR/VPIntrinsics.def"
7126 }
7127
7128 if (!ResOPC.hasValue())
7129 llvm_unreachable(
7130 "Inconsistency: no SDNode available for this VPIntrinsic!");
7131
7132 return ResOPC.getValue();
7133}
7134
7135void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
7136 const VPIntrinsic &VPIntrin) {
7137 unsigned Opcode = getISDForVPIntrinsic(VPIntrin);
7138
7139 SmallVector<EVT, 4> ValueVTs;
7140 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7141 ComputeValueVTs(TLI, DAG.getDataLayout(), VPIntrin.getType(), ValueVTs);
7142 SDVTList VTs = DAG.getVTList(ValueVTs);
7143
7144 // Request operands.
7145 SmallVector<SDValue, 7> OpValues;
7146 for (int i = 0; i < (int)VPIntrin.getNumArgOperands(); ++i)
7147 OpValues.push_back(getValue(VPIntrin.getArgOperand(i)));
7148
7149 SDLoc DL = getCurSDLoc();
7150 SDValue Result = DAG.getNode(Opcode, DL, VTs, OpValues);
7151 setValue(&VPIntrin, Result);
7152}
7153
7154std::pair<SDValue, SDValue>
7155SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
7156 const BasicBlock *EHPadBB) {
7157 MachineFunction &MF = DAG.getMachineFunction();
7158 MachineModuleInfo &MMI = MF.getMMI();
7159 MCSymbol *BeginLabel = nullptr;
7160
7161 if (EHPadBB) {
7162 // Insert a label before the invoke call to mark the try range. This can be
7163 // used to detect deletion of the invoke via the MachineModuleInfo.
7164 BeginLabel = MMI.getContext().createTempSymbol();
7165
7166 // For SjLj, keep track of which landing pads go with which invokes
7167 // so as to maintain the ordering of pads in the LSDA.
7168 unsigned CallSiteIndex = MMI.getCurrentCallSite();
7169 if (CallSiteIndex) {
7170 MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
7171 LPadToCallSiteMap[FuncInfo.MBBMap[EHPadBB]].push_back(CallSiteIndex);
7172
7173 // Now that the call site is handled, stop tracking it.
7174 MMI.setCurrentCallSite(0);
7175 }
7176
7177 // Both PendingLoads and PendingExports must be flushed here;
7178 // this call might not return.
7179 (void)getRoot();
7180 DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getControlRoot(), BeginLabel));
7181
7182 CLI.setChain(getRoot());
7183 }
7184 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7185 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
7186
7187 assert((CLI.IsTailCall || Result.second.getNode()) &&
7188 "Non-null chain expected with non-tail call!");
7189 assert((Result.second.getNode() || !Result.first.getNode()) &&
7190 "Null value expected with tail call!");
7191
7192 if (!Result.second.getNode()) {
7193 // As a special case, a null chain means that a tail call has been emitted
7194 // and the DAG root is already updated.
7195 HasTailCall = true;
7196
7197 // Since there's no actual continuation from this block, nothing can be
7198 // relying on us setting vregs for them.
7199 PendingExports.clear();
7200 } else {
7201 DAG.setRoot(Result.second);
7202 }
7203
7204 if (EHPadBB) {
7205 // Insert a label at the end of the invoke call to mark the try range. This
7206 // can be used to detect deletion of the invoke via the MachineModuleInfo.
7207 MCSymbol *EndLabel = MMI.getContext().createTempSymbol();
7208 DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getRoot(), EndLabel));
7209
7210 // Inform MachineModuleInfo of range.
7211 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
7212 // There is a platform (e.g. wasm) that uses funclet style IR but does not
7213 // actually use outlined funclets and their LSDA info style.
7214 if (MF.hasEHFunclets() && isFuncletEHPersonality(Pers)) {
7215 assert(CLI.CB);
7216 WinEHFuncInfo *EHInfo = DAG.getMachineFunction().getWinEHFuncInfo();
7217 EHInfo->addIPToStateRange(cast<InvokeInst>(CLI.CB), BeginLabel, EndLabel);
7218 } else if (!isScopedEHPersonality(Pers)) {
7219 MF.addInvoke(FuncInfo.MBBMap[EHPadBB], BeginLabel, EndLabel);
7220 }
7221 }
7222
7223 return Result;
7224}
7225
7226void SelectionDAGBuilder::LowerCallTo(const CallBase &CB, SDValue Callee,
7227 bool isTailCall,
7228 const BasicBlock *EHPadBB) {
7229 auto &DL = DAG.getDataLayout();
7230 FunctionType *FTy = CB.getFunctionType();
7231 Type *RetTy = CB.getType();
7232
7233 TargetLowering::ArgListTy Args;
7234 Args.reserve(CB.arg_size());
7235
7236 const Value *SwiftErrorVal = nullptr;
7237 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7238
7239 if (isTailCall) {
7240 // Avoid emitting tail calls in functions with the disable-tail-calls
7241 // attribute.
7242 auto *Caller = CB.getParent()->getParent();
7243 if (Caller->getFnAttribute("disable-tail-calls").getValueAsString() ==
7244 "true")
7245 isTailCall = false;
7246
7247 // We can't tail call inside a function with a swifterror argument. Lowering
7248 // does not support this yet. It would have to move into the swifterror
7249 // register before the call.
7250 if (TLI.supportSwiftError() &&
7251 Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
7252 isTailCall = false;
7253 }
7254
7255 for (auto I = CB.arg_begin(), E = CB.arg_end(); I != E; ++I) {
7256 TargetLowering::ArgListEntry Entry;
7257 const Value *V = *I;
7258
7259 // Skip empty types
7260 if (V->getType()->isEmptyTy())
7261 continue;
7262
7263 SDValue ArgNode = getValue(V);
7264 Entry.Node = ArgNode; Entry.Ty = V->getType();
7265
7266 Entry.setAttributes(&CB, I - CB.arg_begin());
7267
7268 // Use swifterror virtual register as input to the call.
7269 if (Entry.IsSwiftError && TLI.supportSwiftError()) {
7270 SwiftErrorVal = V;
7271 // We find the virtual register for the actual swifterror argument.
7272 // Instead of using the Value, we use the virtual register instead.
7273 Entry.Node =
7274 DAG.getRegister(SwiftError.getOrCreateVRegUseAt(&CB, FuncInfo.MBB, V),
7275 EVT(TLI.getPointerTy(DL)));
7276 }
7277
7278 Args.push_back(Entry);
7279
7280 // If we have an explicit sret argument that is an Instruction, (i.e., it
7281 // might point to function-local memory), we can't meaningfully tail-call.
7282 if (Entry.IsSRet && isa<Instruction>(V))
7283 isTailCall = false;
7284 }
7285
7286 // If call site has a cfguardtarget operand bundle, create and add an
7287 // additional ArgListEntry.
7288 if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_cfguardtarget)) {
7289 TargetLowering::ArgListEntry Entry;
7290 Value *V = Bundle->Inputs[0];
7291 SDValue ArgNode = getValue(V);
7292 Entry.Node = ArgNode;
7293 Entry.Ty = V->getType();
7294 Entry.IsCFGuardTarget = true;
7295 Args.push_back(Entry);
7296 }
7297
7298 // Check if target-independent constraints permit a tail call here.
7299 // Target-dependent constraints are checked within TLI->LowerCallTo.
7300 if (isTailCall && !isInTailCallPosition(CB, DAG.getTarget()))
7301 isTailCall = false;
7302
7303 // Disable tail calls if there is an swifterror argument. Targets have not
7304 // been updated to support tail calls.
7305 if (TLI.supportSwiftError() && SwiftErrorVal)
7306 isTailCall = false;
7307
7308 TargetLowering::CallLoweringInfo CLI(DAG);
7309 CLI.setDebugLoc(getCurSDLoc())
7310 .setChain(getRoot())
7311 .setCallee(RetTy, FTy, Callee, std::move(Args), CB)
7312 .setTailCall(isTailCall)
7313 .setConvergent(CB.isConvergent())
7314 .setIsPreallocated(
7315 CB.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0);
7316 std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
7317
7318 if (Result.first.getNode()) {
7319 Result.first = lowerRangeToAssertZExt(DAG, CB, Result.first);
7320 setValue(&CB, Result.first);
7321 }
7322
7323 // The last element of CLI.InVals has the SDValue for swifterror return.
7324 // Here we copy it to a virtual register and update SwiftErrorMap for
7325 // book-keeping.
7326 if (SwiftErrorVal && TLI.supportSwiftError()) {
7327 // Get the last element of InVals.
7328 SDValue Src = CLI.InVals.back();
7329 Register VReg =
7330 SwiftError.getOrCreateVRegDefAt(&CB, FuncInfo.MBB, SwiftErrorVal);
7331 SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src);
7332 DAG.setRoot(CopyNode);
7333 }
7334}
7335
7336static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
7337 SelectionDAGBuilder &Builder) {
7338 // Check to see if this load can be trivially constant folded, e.g. if the
7339 // input is from a string literal.
7340 if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
7341 // Cast pointer to the type we really want to load.
7342 Type *LoadTy =
7343 Type::getIntNTy(PtrVal->getContext(), LoadVT.getScalarSizeInBits());
7344 if (LoadVT.isVector())
7345 LoadTy = FixedVectorType::get(LoadTy, LoadVT.getVectorNumElements());
7346
7347 LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
7348 PointerType::getUnqual(LoadTy));
7349
7350 if (const Constant *LoadCst = ConstantFoldLoadFromConstPtr(
7351 const_cast<Constant *>(LoadInput), LoadTy, *Builder.DL))
7352 return Builder.getValue(LoadCst);
7353 }
7354
7355 // Otherwise, we have to emit the load. If the pointer is to unfoldable but
7356 // still constant memory, the input chain can be the entry node.
7357 SDValue Root;
7358 bool ConstantMemory = false;
7359
7360 // Do not serialize (non-volatile) loads of constant memory with anything.
7361 if (Builder.AA && Builder.AA->pointsToConstantMemory(PtrVal)) {
7362 Root = Builder.DAG.getEntryNode();
7363 ConstantMemory = true;
7364 } else {
7365 // Do not serialize non-volatile loads against each other.
7366 Root = Builder.DAG.getRoot();
7367 }
7368
7369 SDValue Ptr = Builder.getValue(PtrVal);
7370 SDValue LoadVal =
7371 Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root, Ptr,
7372 MachinePointerInfo(PtrVal), Align(1));
7373
7374 if (!ConstantMemory)
7375 Builder.PendingLoads.push_back(LoadVal.getValue(1));
7376 return LoadVal;
7377}
7378
7379/// Record the value for an instruction that produces an integer result,
7380/// converting the type where necessary.
7381void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I,
7382 SDValue Value,
7383 bool IsSigned) {
7384 EVT VT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
7385 I.getType(), true);
7386 if (IsSigned)
7387 Value = DAG.getSExtOrTrunc(Value, getCurSDLoc(), VT);
7388 else
7389 Value = DAG.getZExtOrTrunc(Value, getCurSDLoc(), VT);
7390 setValue(&I, Value);
7391}
7392
7393/// See if we can lower a memcmp/bcmp call into an optimized form. If so, return
7394/// true and lower it. Otherwise return false, and it will be lowered like a
7395/// normal call.
7396/// The caller already checked that \p I calls the appropriate LibFunc with a
7397/// correct prototype.
7398bool SelectionDAGBuilder::visitMemCmpBCmpCall(const CallInst &I) {
7399 const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
7400 const Value *Size = I.getArgOperand(2);
7401 const ConstantInt *CSize = dyn_cast<ConstantInt>(Size);
7402 if (CSize && CSize->getZExtValue() == 0) {
7403 EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
7404 I.getType(), true);
7405 setValue(&I, DAG.getConstant(0, getCurSDLoc(), CallVT));
7406 return true;
7407 }
7408
7409 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7410 std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemcmp(
7411 DAG, getCurSDLoc(), DAG.getRoot(), getValue(LHS), getValue(RHS),
7412 getValue(Size), MachinePointerInfo(LHS), MachinePointerInfo(RHS));
7413 if (Res.first.getNode()) {
7414 processIntegerCallValue(I, Res.first, true);
7415 PendingLoads.push_back(Res.second);
7416 return true;
7417 }
7418
7419 // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS) != 0
7420 // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS) != 0
7421 if (!CSize || !isOnlyUsedInZeroEqualityComparison(&I))
7422 return false;
7423
7424 // If the target has a fast compare for the given size, it will return a
7425 // preferred load type for that size. Require that the load VT is legal and
7426 // that the target supports unaligned loads of that type. Otherwise, return
7427 // INVALID.
7428 auto hasFastLoadsAndCompare = [&](unsigned NumBits) {
7429 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7430 MVT LVT = TLI.hasFastEqualityCompare(NumBits);
7431 if (LVT != MVT::INVALID_SIMPLE_VALUE_TYPE) {
7432 // TODO: Handle 5 byte compare as 4-byte + 1 byte.
7433 // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
7434 // TODO: Check alignment of src and dest ptrs.
7435 unsigned DstAS = LHS->getType()->getPointerAddressSpace();
7436 unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
7437 if (!TLI.isTypeLegal(LVT) ||
7438 !TLI.allowsMisalignedMemoryAccesses(LVT, SrcAS) ||
7439 !TLI.allowsMisalignedMemoryAccesses(LVT, DstAS))
7440 LVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
7441 }
7442
7443 return LVT;
7444 };
7445
7446 // This turns into unaligned loads. We only do this if the target natively
7447 // supports the MVT we'll be loading or if it is small enough (<= 4) that
7448 // we'll only produce a small number of byte loads.
7449 MVT LoadVT;
7450 unsigned NumBitsToCompare = CSize->getZExtValue() * 8;
7451 switch (NumBitsToCompare) {
7452 default:
7453 return false;
7454 case 16:
7455 LoadVT = MVT::i16;
7456 break;
7457 case 32:
7458 LoadVT = MVT::i32;
7459 break;
7460 case 64:
7461 case 128:
7462 case 256:
7463 LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
7464 break;
7465 }
7466
7467 if (LoadVT == MVT::INVALID_SIMPLE_VALUE_TYPE)
7468 return false;
7469
7470 SDValue LoadL = getMemCmpLoad(LHS, LoadVT, *this);
7471 SDValue LoadR = getMemCmpLoad(RHS, LoadVT, *this);
7472
7473 // Bitcast to a wide integer type if the loads are vectors.
7474 if (LoadVT.isVector()) {
7475 EVT CmpVT = EVT::getIntegerVT(LHS->getContext(), LoadVT.getSizeInBits());
7476 LoadL = DAG.getBitcast(CmpVT, LoadL);
7477 LoadR = DAG.getBitcast(CmpVT, LoadR);
7478 }
7479
7480 SDValue Cmp = DAG.getSetCC(getCurSDLoc(), MVT::i1, LoadL, LoadR, ISD::SETNE);
7481 processIntegerCallValue(I, Cmp, false);
7482 return true;
7483}
7484
7485/// See if we can lower a memchr call into an optimized form. If so, return
7486/// true and lower it. Otherwise return false, and it will be lowered like a
7487/// normal call.
7488/// The caller already checked that \p I calls the appropriate LibFunc with a
7489/// correct prototype.
7490bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) {
7491 const Value *Src = I.getArgOperand(0);
7492 const Value *Char = I.getArgOperand(1);
7493 const Value *Length = I.getArgOperand(2);
7494
7495 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7496 std::pair<SDValue, SDValue> Res =
7497 TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(),
7498 getValue(Src), getValue(Char), getValue(Length),
7499 MachinePointerInfo(Src));
7500 if (Res.first.getNode()) {
7501 setValue(&I, Res.first);
7502 PendingLoads.push_back(Res.second);
7503 return true;
7504 }
7505
7506 return false;
7507}
7508
7509/// See if we can lower a mempcpy call into an optimized form. If so, return
7510/// true and lower it. Otherwise return false, and it will be lowered like a
7511/// normal call.
7512/// The caller already checked that \p I calls the appropriate LibFunc with a
7513/// correct prototype.
7514bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) {
7515 SDValue Dst = getValue(I.getArgOperand(0));
7516 SDValue Src = getValue(I.getArgOperand(1));
7517 SDValue Size = getValue(I.getArgOperand(2));
7518
7519 Align DstAlign = DAG.InferPtrAlign(Dst).valueOrOne();
7520 Align SrcAlign = DAG.InferPtrAlign(Src).valueOrOne();
7521 // DAG::getMemcpy needs Alignment to be defined.
7522 Align Alignment = std::min(DstAlign, SrcAlign);
7523
7524 bool isVol = false;
7525 SDLoc sdl = getCurSDLoc();
7526
7527 // In the mempcpy context we need to pass in a false value for isTailCall
7528 // because the return pointer needs to be adjusted by the size of
7529 // the copied memory.
7530 SDValue Root = isVol ? getRoot() : getMemoryRoot();
7531 SDValue MC = DAG.getMemcpy(Root, sdl, Dst, Src, Size, Alignment, isVol, false,
7532 /*isTailCall=*/false,
7533 MachinePointerInfo(I.getArgOperand(0)),
7534 MachinePointerInfo(I.getArgOperand(1)));
7535 assert(MC.getNode() != nullptr &&
7536 "** memcpy should not be lowered as TailCall in mempcpy context **");
7537 DAG.setRoot(MC);
7538
7539 // Check if Size needs to be truncated or extended.
7540 Size = DAG.getSExtOrTrunc(Size, sdl, Dst.getValueType());
7541
7542 // Adjust return pointer to point just past the last dst byte.
7543 SDValue DstPlusSize = DAG.getNode(ISD::ADD, sdl, Dst.getValueType(),
7544 Dst, Size);
7545 setValue(&I, DstPlusSize);
7546 return true;
7547}
7548
7549/// See if we can lower a strcpy call into an optimized form. If so, return
7550/// true and lower it, otherwise return false and it will be lowered like a
7551/// normal call.
7552/// The caller already checked that \p I calls the appropriate LibFunc with a
7553/// correct prototype.
7554bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) {
7555 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
7556
7557 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7558 std::pair<SDValue, SDValue> Res =
7559 TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(),
7560 getValue(Arg0), getValue(Arg1),
7561 MachinePointerInfo(Arg0),
7562 MachinePointerInfo(Arg1), isStpcpy);
7563 if (Res.first.getNode()) {
7564 setValue(&I, Res.first);
7565 DAG.setRoot(Res.second);
7566 return true;
7567 }
7568
7569 return false;
7570}
7571
7572/// See if we can lower a strcmp call into an optimized form. If so, return
7573/// true and lower it, otherwise return false and it will be lowered like a
7574/// normal call.
7575/// The caller already checked that \p I calls the appropriate LibFunc with a
7576/// correct prototype.
7577bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) {
7578 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
7579
7580 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7581 std::pair<SDValue, SDValue> Res =
7582 TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(),
7583 getValue(Arg0), getValue(Arg1),
7584 MachinePointerInfo(Arg0),
7585 MachinePointerInfo(Arg1));
7586 if (Res.first.getNode()) {
7587 processIntegerCallValue(I, Res.first, true);
7588 PendingLoads.push_back(Res.second);
7589 return true;
7590 }
7591
7592 return false;
7593}
7594
7595/// See if we can lower a strlen call into an optimized form. If so, return
7596/// true and lower it, otherwise return false and it will be lowered like a
7597/// normal call.
7598/// The caller already checked that \p I calls the appropriate LibFunc with a
7599/// correct prototype.
7600bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) {
7601 const Value *Arg0 = I.getArgOperand(0);
7602
7603 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7604 std::pair<SDValue, SDValue> Res =
7605 TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(),
7606 getValue(Arg0), MachinePointerInfo(Arg0));
7607 if (Res.first.getNode()) {
7608 processIntegerCallValue(I, Res.first, false);
7609 PendingLoads.push_back(Res.second);
7610 return true;
7611 }
7612
7613 return false;
7614}
7615
7616/// See if we can lower a strnlen call into an optimized form. If so, return
7617/// true and lower it, otherwise return false and it will be lowered like a
7618/// normal call.
7619/// The caller already checked that \p I calls the appropriate LibFunc with a
7620/// correct prototype.
7621bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) {
7622 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
7623
7624 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7625 std::pair<SDValue, SDValue> Res =
7626 TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(),
7627 getValue(Arg0), getValue(Arg1),
7628 MachinePointerInfo(Arg0));
7629 if (Res.first.getNode()) {
7630 processIntegerCallValue(I, Res.first, false);
7631 PendingLoads.push_back(Res.second);
7632 return true;
7633 }
7634
7635 return false;
7636}
7637
7638/// See if we can lower a unary floating-point operation into an SDNode with
7639/// the specified Opcode. If so, return true and lower it, otherwise return
7640/// false and it will be lowered like a normal call.
7641/// The caller already checked that \p I calls the appropriate LibFunc with a
7642/// correct prototype.
7643bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
7644 unsigned Opcode) {
7645 // We already checked this call's prototype; verify it doesn't modify errno.
7646 if (!I.onlyReadsMemory())
7647 return false;
7648
7649 SDNodeFlags Flags;
7650 Flags.copyFMF(cast<FPMathOperator>(I));
7651
7652 SDValue Tmp = getValue(I.getArgOperand(0));
7653 setValue(&I,
7654 DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp, Flags));
7655 return true;
7656}
7657
7658/// See if we can lower a binary floating-point operation into an SDNode with
7659/// the specified Opcode. If so, return true and lower it. Otherwise return
7660/// false, and it will be lowered like a normal call.
7661/// The caller already checked that \p I calls the appropriate LibFunc with a
7662/// correct prototype.
7663bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I,
7664 unsigned Opcode) {
7665 // We already checked this call's prototype; verify it doesn't modify errno.
7666 if (!I.onlyReadsMemory())
7667 return false;
7668
7669 SDNodeFlags Flags;
7670 Flags.copyFMF(cast<FPMathOperator>(I));
7671
7672 SDValue Tmp0 = getValue(I.getArgOperand(0));
7673 SDValue Tmp1 = getValue(I.getArgOperand(1));
7674 EVT VT = Tmp0.getValueType();
7675 setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1, Flags));
7676 return true;
7677}
7678
7679void SelectionDAGBuilder::visitCall(const CallInst &I) {
7680 // Handle inline assembly differently.
7681 if (I.isInlineAsm()) {
7682 visitInlineAsm(I);
7683 return;
7684 }
7685
7686 if (Function *F = I.getCalledFunction()) {
7687 if (F->isDeclaration()) {
7688 // Is this an LLVM intrinsic or a target-specific intrinsic?
7689 unsigned IID = F->getIntrinsicID();
7690 if (!IID)
7691 if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo())
7692 IID = II->getIntrinsicID(F);
7693
7694 if (IID) {
7695 visitIntrinsicCall(I, IID);
7696 return;
7697 }
7698 }
7699
7700 // Check for well-known libc/libm calls. If the function is internal, it
7701 // can't be a library call. Don't do the check if marked as nobuiltin for
7702 // some reason or the call site requires strict floating point semantics.
7703 LibFunc Func;
7704 if (!I.isNoBuiltin() && !I.isStrictFP() && !F->hasLocalLinkage() &&
7705 F->hasName() && LibInfo->getLibFunc(*F, Func) &&
7706 LibInfo->hasOptimizedCodeGen(Func)) {
7707 switch (Func) {
7708 default: break;
7709 case LibFunc_bcmp:
7710 if (visitMemCmpBCmpCall(I))
7711 return;
7712 break;
7713 case LibFunc_copysign:
7714 case LibFunc_copysignf:
7715 case LibFunc_copysignl:
7716 // We already checked this call's prototype; verify it doesn't modify
7717 // errno.
7718 if (I.onlyReadsMemory()) {
7719 SDValue LHS = getValue(I.getArgOperand(0));
7720 SDValue RHS = getValue(I.getArgOperand(1));
7721 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(),
7722 LHS.getValueType(), LHS, RHS));
7723 return;
7724 }
7725 break;
7726 case LibFunc_fabs:
7727 case LibFunc_fabsf:
7728 case LibFunc_fabsl:
7729 if (visitUnaryFloatCall(I, ISD::FABS))
7730 return;
7731 break;
7732 case LibFunc_fmin:
7733 case LibFunc_fminf:
7734 case LibFunc_fminl:
7735 if (visitBinaryFloatCall(I, ISD::FMINNUM))
7736 return;
7737 break;
7738 case LibFunc_fmax:
7739 case LibFunc_fmaxf:
7740 case LibFunc_fmaxl:
7741 if (visitBinaryFloatCall(I, ISD::FMAXNUM))
7742 return;
7743 break;
7744 case LibFunc_sin:
7745 case LibFunc_sinf:
7746 case LibFunc_sinl:
7747 if (visitUnaryFloatCall(I, ISD::FSIN))
7748 return;
7749 break;
7750 case LibFunc_cos:
7751 case LibFunc_cosf:
7752 case LibFunc_cosl:
7753 if (visitUnaryFloatCall(I, ISD::FCOS))
7754 return;
7755 break;
7756 case LibFunc_sqrt:
7757 case LibFunc_sqrtf:
7758 case LibFunc_sqrtl:
7759 case LibFunc_sqrt_finite:
7760 case LibFunc_sqrtf_finite:
7761 case LibFunc_sqrtl_finite:
7762 if (visitUnaryFloatCall(I, ISD::FSQRT))
7763 return;
7764 break;
7765 case LibFunc_floor:
7766 case LibFunc_floorf:
7767 case LibFunc_floorl:
7768 if (visitUnaryFloatCall(I, ISD::FFLOOR))
7769 return;
7770 break;
7771 case LibFunc_nearbyint:
7772 case LibFunc_nearbyintf:
7773 case LibFunc_nearbyintl:
7774 if (visitUnaryFloatCall(I, ISD::FNEARBYINT))
7775 return;
7776 break;
7777 case LibFunc_ceil:
7778 case LibFunc_ceilf:
7779 case LibFunc_ceill:
7780 if (visitUnaryFloatCall(I, ISD::FCEIL))
7781 return;
7782 break;
7783 case LibFunc_rint:
7784 case LibFunc_rintf:
7785 case LibFunc_rintl:
7786 if (visitUnaryFloatCall(I, ISD::FRINT))
7787 return;
7788 break;
7789 case LibFunc_round:
7790 case LibFunc_roundf:
7791 case LibFunc_roundl:
7792 if (visitUnaryFloatCall(I, ISD::FROUND))
7793 return;
7794 break;
7795 case LibFunc_trunc:
7796 case LibFunc_truncf:
7797 case LibFunc_truncl:
7798 if (visitUnaryFloatCall(I, ISD::FTRUNC))
7799 return;
7800 break;
7801 case LibFunc_log2:
7802 case LibFunc_log2f:
7803 case LibFunc_log2l:
7804 if (visitUnaryFloatCall(I, ISD::FLOG2))
7805 return;
7806 break;
7807 case LibFunc_exp2:
7808 case LibFunc_exp2f:
7809 case LibFunc_exp2l:
7810 if (visitUnaryFloatCall(I, ISD::FEXP2))
7811 return;
7812 break;
7813 case LibFunc_memcmp:
7814 if (visitMemCmpBCmpCall(I))
7815 return;
7816 break;
7817 case LibFunc_mempcpy:
7818 if (visitMemPCpyCall(I))
7819 return;
7820 break;
7821 case LibFunc_memchr:
7822 if (visitMemChrCall(I))
7823 return;
7824 break;
7825 case LibFunc_strcpy:
7826 if (visitStrCpyCall(I, false))
7827 return;
7828 break;
7829 case LibFunc_stpcpy:
7830 if (visitStrCpyCall(I, true))
7831 return;
7832 break;
7833 case LibFunc_strcmp:
7834 if (visitStrCmpCall(I))
7835 return;
7836 break;
7837 case LibFunc_strlen:
7838 if (visitStrLenCall(I))
7839 return;
7840 break;
7841 case LibFunc_strnlen:
7842 if (visitStrNLenCall(I))
7843 return;
7844 break;
7845 }
7846 }
7847 }
7848
7849 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
7850 // have to do anything here to lower funclet bundles.
7851 // CFGuardTarget bundles are lowered in LowerCallTo.
7852 assert(!I.hasOperandBundlesOtherThan(
7853 {LLVMContext::OB_deopt, LLVMContext::OB_funclet,
7854 LLVMContext::OB_cfguardtarget, LLVMContext::OB_preallocated}) &&
7855 "Cannot lower calls with arbitrary operand bundles!");
7856
7857 SDValue Callee = getValue(I.getCalledOperand());
7858
7859 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
7860 LowerCallSiteWithDeoptBundle(&I, Callee, nullptr);
7861 else
7862 // Check if we can potentially perform a tail call. More detailed checking
7863 // is be done within LowerCallTo, after more information about the call is
7864 // known.
7865 LowerCallTo(I, Callee, I.isTailCall());
7866}
7867
7868namespace {
7869
7870/// AsmOperandInfo - This contains information for each constraint that we are
7871/// lowering.
7872class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
7873public:
7874 /// CallOperand - If this is the result output operand or a clobber
7875 /// this is null, otherwise it is the incoming operand to the CallInst.
7876 /// This gets modified as the asm is processed.
7877 SDValue CallOperand;
7878
7879 /// AssignedRegs - If this is a register or register class operand, this
7880 /// contains the set of register corresponding to the operand.
7881 RegsForValue AssignedRegs;
7882
7883 explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
7884 : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr, 0) {
7885 }
7886
7887 /// Whether or not this operand accesses memory
7888 bool hasMemory(const TargetLowering &TLI) const {
7889 // Indirect operand accesses access memory.
7890 if (isIndirect)
7891 return true;
7892
7893 for (const auto &Code : Codes)
7894 if (TLI.getConstraintType(Code) == TargetLowering::C_Memory)
7895 return true;
7896
7897 return false;
7898 }
7899
7900 /// getCallOperandValEVT - Return the EVT of the Value* that this operand
7901 /// corresponds to. If there is no Value* for this operand, it returns
7902 /// MVT::Other.
7903 EVT getCallOperandValEVT(LLVMContext &Context, const TargetLowering &TLI,
7904 const DataLayout &DL) const {
7905 if (!CallOperandVal) return MVT::Other;
7906
7907 if (isa<BasicBlock>(CallOperandVal))
7908 return TLI.getProgramPointerTy(DL);
7909
7910 llvm::Type *OpTy = CallOperandVal->getType();
7911
7912 // FIXME: code duplicated from TargetLowering::ParseConstraints().
7913 // If this is an indirect operand, the operand is a pointer to the
7914 // accessed type.
7915 if (isIndirect) {
7916 PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
7917 if (!PtrTy)
7918 report_fatal_error("Indirect operand for inline asm not a pointer!");
7919 OpTy = PtrTy->getElementType();
7920 }
7921
7922 // Look for vector wrapped in a struct. e.g. { <16 x i8> }.
7923 if (StructType *STy = dyn_cast<StructType>(OpTy))
7924 if (STy->getNumElements() == 1)
7925 OpTy = STy->getElementType(0);
7926
7927 // If OpTy is not a single value, it may be a struct/union that we
7928 // can tile with integers.
7929 if (!OpTy->isSingleValueType() && OpTy->isSized()) {
7930 unsigned BitSize = DL.getTypeSizeInBits(OpTy);
7931 switch (BitSize) {
7932 default: break;
7933 case 1:
7934 case 8:
7935 case 16:
7936 case 32:
7937 case 64:
7938 case 128:
7939 OpTy = IntegerType::get(Context, BitSize);
7940 break;
7941 }
7942 }
7943
7944 return TLI.getValueType(DL, OpTy, true);
7945 }
7946};
7947
7948
7949} // end anonymous namespace
7950
7951/// Make sure that the output operand \p OpInfo and its corresponding input
7952/// operand \p MatchingOpInfo have compatible constraint types (otherwise error
7953/// out).
7954static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo,
7955 SDISelAsmOperandInfo &MatchingOpInfo,
7956 SelectionDAG &DAG) {
7957 if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
7958 return;
7959
7960 const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
7961 const auto &TLI = DAG.getTargetLoweringInfo();
7962
7963 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
7964 TLI.getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
7965 OpInfo.ConstraintVT);
7966 std::pair<unsigned, const TargetRegisterClass *> InputRC =
7967 TLI.getRegForInlineAsmConstraint(TRI, MatchingOpInfo.ConstraintCode,
7968 MatchingOpInfo.ConstraintVT);
7969 if ((OpInfo.ConstraintVT.isInteger() !=
7970 MatchingOpInfo.ConstraintVT.isInteger()) ||
7971 (MatchRC.second != InputRC.second)) {
7972 // FIXME: error out in a more elegant fashion
7973 report_fatal_error("Unsupported asm: input constraint"
7974 " with a matching output constraint of"
7975 " incompatible type!");
7976 }
7977 MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
7978}
7979
7980/// Get a direct memory input to behave well as an indirect operand.
7981/// This may introduce stores, hence the need for a \p Chain.
7982/// \return The (possibly updated) chain.
7983static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location,
7984 SDISelAsmOperandInfo &OpInfo,
7985 SelectionDAG &DAG) {
7986 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7987
7988 // If we don't have an indirect input, put it in the constpool if we can,
7989 // otherwise spill it to a stack slot.
7990 // TODO: This isn't quite right. We need to handle these according to
7991 // the addressing mode that the constraint wants. Also, this may take
7992 // an additional register for the computation and we don't want that
7993 // either.
7994
7995 // If the operand is a float, integer, or vector constant, spill to a
7996 // constant pool entry to get its address.
7997 const Value *OpVal = OpInfo.CallOperandVal;
7998 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
7999 isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
8000 OpInfo.CallOperand = DAG.getConstantPool(
8001 cast<Constant>(OpVal), TLI.getPointerTy(DAG.getDataLayout()));
8002 return Chain;
8003 }
8004
8005 // Otherwise, create a stack slot and emit a store to it before the asm.
8006 Type *Ty = OpVal->getType();
8007 auto &DL = DAG.getDataLayout();
8008 uint64_t TySize = DL.getTypeAllocSize(Ty);
8009 MachineFunction &MF = DAG.getMachineFunction();
8010 int SSFI = MF.getFrameInfo().CreateStackObject(
8011 TySize, DL.getPrefTypeAlign(Ty), false);
8012 SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getFrameIndexTy(DL));
8013 Chain = DAG.getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
8014 MachinePointerInfo::getFixedStack(MF, SSFI),
8015 TLI.getMemValueType(DL, Ty));
8016 OpInfo.CallOperand = StackSlot;
8017
8018 return Chain;
8019}
8020
8021/// GetRegistersForValue - Assign registers (virtual or physical) for the
8022/// specified operand. We prefer to assign virtual registers, to allow the
8023/// register allocator to handle the assignment process. However, if the asm
8024/// uses features that we can't model on machineinstrs, we have SDISel do the
8025/// allocation. This produces generally horrible, but correct, code.
8026///
8027/// OpInfo describes the operand
8028/// RefOpInfo describes the matching operand if any, the operand otherwise
8029static void GetRegistersForValue(SelectionDAG &DAG, const SDLoc &DL,
8030 SDISelAsmOperandInfo &OpInfo,
8031 SDISelAsmOperandInfo &RefOpInfo) {
8032 LLVMContext &Context = *DAG.getContext();
8033 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8034
8035 MachineFunction &MF = DAG.getMachineFunction();
8036 SmallVector<unsigned, 4> Regs;
8037 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
8038
8039 // No work to do for memory operations.
8040 if (OpInfo.ConstraintType == TargetLowering::C_Memory)
8041 return;
8042
8043 // If this is a constraint for a single physreg, or a constraint for a
8044 // register class, find it.
8045 unsigned AssignedReg;
8046 const TargetRegisterClass *RC;
8047 std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint(
8048 &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
8049 // RC is unset only on failure. Return immediately.
8050 if (!RC)
8051 return;
8052
8053 // Get the actual register value type. This is important, because the user
8054 // may have asked for (e.g.) the AX register in i32 type. We need to
8055 // remember that AX is actually i16 to get the right extension.
8056 const MVT RegVT = *TRI.legalclasstypes_begin(*RC);
8057
8058 if (OpInfo.ConstraintVT != MVT::Other) {
8059 // If this is an FP operand in an integer register (or visa versa), or more
8060 // generally if the operand value disagrees with the register class we plan
8061 // to stick it in, fix the operand type.
8062 //
8063 // If this is an input value, the bitcast to the new type is done now.
8064 // Bitcast for output value is done at the end of visitInlineAsm().
8065 if ((OpInfo.Type == InlineAsm::isOutput ||
8066 OpInfo.Type == InlineAsm::isInput) &&
8067 !TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
8068 // Try to convert to the first EVT that the reg class contains. If the
8069 // types are identical size, use a bitcast to convert (e.g. two differing
8070 // vector types). Note: output bitcast is done at the end of
8071 // visitInlineAsm().
8072 if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
8073 // Exclude indirect inputs while they are unsupported because the code
8074 // to perform the load is missing and thus OpInfo.CallOperand still
8075 // refers to the input address rather than the pointed-to value.
8076 if (OpInfo.Type == InlineAsm::isInput && !OpInfo.isIndirect)
8077 OpInfo.CallOperand =
8078 DAG.getNode(ISD::BITCAST, DL, RegVT, OpInfo.CallOperand);
8079 OpInfo.ConstraintVT = RegVT;
8080 // If the operand is an FP value and we want it in integer registers,
8081 // use the corresponding integer type. This turns an f64 value into
8082 // i64, which can be passed with two i32 values on a 32-bit machine.
8083 } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
8084 MVT VT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
8085 if (OpInfo.Type == InlineAsm::isInput)
8086 OpInfo.CallOperand =
8087 DAG.getNode(ISD::BITCAST, DL, VT, OpInfo.CallOperand);
8088 OpInfo.ConstraintVT = VT;
8089 }
8090 }
8091 }
8092
8093 // No need to allocate a matching input constraint since the constraint it's
8094 // matching to has already been allocated.
8095 if (OpInfo.isMatchingInputConstraint())
8096 return;
8097
8098 EVT ValueVT = OpInfo.ConstraintVT;
8099 if (OpInfo.ConstraintVT == MVT::Other)
8100 ValueVT = RegVT;
8101
8102 // Initialize NumRegs.
8103 unsigned NumRegs = 1;
8104 if (OpInfo.ConstraintVT != MVT::Other)
8105 NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT);
8106
8107 // If this is a constraint for a specific physical register, like {r17},
8108 // assign it now.
8109
8110 // If this associated to a specific register, initialize iterator to correct
8111 // place. If virtual, make sure we have enough registers
8112
8113 // Initialize iterator if necessary
8114 TargetRegisterClass::iterator I = RC->begin();
8115 MachineRegisterInfo &RegInfo = MF.getRegInfo();
8116
8117 // Do not check for single registers.
8118 if (AssignedReg) {
8119 for (; *I != AssignedReg; ++I)
8120 assert(I != RC->end() && "AssignedReg should be member of RC");
8121 }
8122
8123 for (; NumRegs; --NumRegs, ++I) {
8124 assert(I != RC->end() && "Ran out of registers to allocate!");
8125 Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC);
8126 Regs.push_back(R);
8127 }
8128
8129 OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
8130}
8131
8132static unsigned
8133findMatchingInlineAsmOperand(unsigned OperandNo,
8134 const std::vector<SDValue> &AsmNodeOperands) {
8135 // Scan until we find the definition we already emitted of this operand.
8136 unsigned CurOp = InlineAsm::Op_FirstOperand;
8137 for (; OperandNo; --OperandNo) {
8138 // Advance to the next operand.
8139 unsigned OpFlag =
8140 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
8141 assert((InlineAsm::isRegDefKind(OpFlag) ||
8142 InlineAsm::isRegDefEarlyClobberKind(OpFlag) ||
8143 InlineAsm::isMemKind(OpFlag)) &&
8144 "Skipped past definitions?");
8145 CurOp += InlineAsm::getNumOperandRegisters(OpFlag) + 1;
8146 }
8147 return CurOp;
8148}
8149
8150namespace {
8151
8152class ExtraFlags {
8153 unsigned Flags = 0;
8154
8155public:
8156 explicit ExtraFlags(const CallBase &Call) {
8157 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
8158 if (IA->hasSideEffects())
8159 Flags |= InlineAsm::Extra_HasSideEffects;
8160 if (IA->isAlignStack())
8161 Flags |= InlineAsm::Extra_IsAlignStack;
8162 if (Call.isConvergent())
8163 Flags |= InlineAsm::Extra_IsConvergent;
8164 Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
8165 }
8166
8167 void update(const TargetLowering::AsmOperandInfo &OpInfo) {
8168 // Ideally, we would only check against memory constraints. However, the
8169 // meaning of an Other constraint can be target-specific and we can't easily
8170 // reason about it. Therefore, be conservative and set MayLoad/MayStore
8171 // for Other constraints as well.
8172 if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
8173 OpInfo.ConstraintType == TargetLowering::C_Other) {
8174 if (OpInfo.Type == InlineAsm::isInput)
8175 Flags |= InlineAsm::Extra_MayLoad;
8176 else if (OpInfo.Type == InlineAsm::isOutput)
8177 Flags |= InlineAsm::Extra_MayStore;
8178 else if (OpInfo.Type == InlineAsm::isClobber)
8179 Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
8180 }
8181 }
8182
8183 unsigned get() const { return Flags; }
8184};
8185
8186} // end anonymous namespace
8187
8188/// visitInlineAsm - Handle a call to an InlineAsm object.
8189void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call) {
8190 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
8191
8192 /// ConstraintOperands - Information about all of the constraints.
8193 SmallVector<SDISelAsmOperandInfo, 16> ConstraintOperands;
8194
8195 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8196 TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(
8197 DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), Call);
8198
8199 // First Pass: Calculate HasSideEffects and ExtraFlags (AlignStack,
8200 // AsmDialect, MayLoad, MayStore).
8201 bool HasSideEffect = IA->hasSideEffects();
8202 ExtraFlags ExtraInfo(Call);
8203
8204 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
8205 unsigned ResNo = 0; // ResNo - The result number of the next output.
8206 unsigned NumMatchingOps = 0;
8207 for (auto &T : TargetConstraints) {
8208 ConstraintOperands.push_back(SDISelAsmOperandInfo(T));
8209 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
8210
8211 // Compute the value type for each operand.
8212 if (OpInfo.Type == InlineAsm::isInput ||
8213 (OpInfo.Type == InlineAsm::isOutput && OpInfo.isIndirect)) {
8214 OpInfo.CallOperandVal = Call.getArgOperand(ArgNo++);
8215
8216 // Process the call argument. BasicBlocks are labels, currently appearing
8217 // only in asm's.
8218 if (isa<CallBrInst>(Call) &&
8219 ArgNo - 1 >= (cast<CallBrInst>(&Call)->getNumArgOperands() -
8220 cast<CallBrInst>(&Call)->getNumIndirectDests() -
8221 NumMatchingOps) &&
8222 (NumMatchingOps == 0 ||
8223 ArgNo - 1 < (cast<CallBrInst>(&Call)->getNumArgOperands() -
8224 NumMatchingOps))) {
8225 const auto *BA = cast<BlockAddress>(OpInfo.CallOperandVal);
8226 EVT VT = TLI.getValueType(DAG.getDataLayout(), BA->getType(), true);
8227 OpInfo.CallOperand = DAG.getTargetBlockAddress(BA, VT);
8228 } else if (const auto *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
8229 OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
8230 } else {
8231 OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
8232 }
8233
8234 EVT VT = OpInfo.getCallOperandValEVT(*DAG.getContext(), TLI,
8235 DAG.getDataLayout());
8236 OpInfo.ConstraintVT = VT.isSimple() ? VT.getSimpleVT() : MVT::Other;
8237 } else if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) {
8238 // The return value of the call is this value. As such, there is no
8239 // corresponding argument.
8240 assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
8241 if (StructType *STy = dyn_cast<StructType>(Call.getType())) {
8242 OpInfo.ConstraintVT = TLI.getSimpleValueType(
8243 DAG.getDataLayout(), STy->getElementType(ResNo));
8244 } else {
8245 assert(ResNo == 0 && "Asm only has one result!");
8246 OpInfo.ConstraintVT =
8247 TLI.getSimpleValueType(DAG.getDataLayout(), Call.getType());
8248 }
8249 ++ResNo;
8250 } else {
8251 OpInfo.ConstraintVT = MVT::Other;
8252 }
8253
8254 if (OpInfo.hasMatchingInput())
8255 ++NumMatchingOps;
8256
8257 if (!HasSideEffect)
8258 HasSideEffect = OpInfo.hasMemory(TLI);
8259
8260 // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
8261 // FIXME: Could we compute this on OpInfo rather than T?
8262
8263 // Compute the constraint code and ConstraintType to use.
8264 TLI.ComputeConstraintToUse(T, SDValue());
8265
8266 if (T.ConstraintType == TargetLowering::C_Immediate &&
8267 OpInfo.CallOperand && !isa<ConstantSDNode>(OpInfo.CallOperand))
8268 // We've delayed emitting a diagnostic like the "n" constraint because
8269 // inlining could cause an integer showing up.
8270 return emitInlineAsmError(Call, "constraint '" + Twine(T.ConstraintCode) +
8271 "' expects an integer constant "
8272 "expression");
8273
8274 ExtraInfo.update(T);
8275 }
8276
8277
8278 // We won't need to flush pending loads if this asm doesn't touch
8279 // memory and is nonvolatile.
8280 SDValue Flag, Chain = (HasSideEffect) ? getRoot() : DAG.getRoot();
8281
8282 bool IsCallBr = isa<CallBrInst>(Call);
8283 if (IsCallBr) {
8284 // If this is a callbr we need to flush pending exports since inlineasm_br
8285 // is a terminator. We need to do this before nodes are glued to
8286 // the inlineasm_br node.
8287 Chain = getControlRoot();
8288 }
8289
8290 // Second pass over the constraints: compute which constraint option to use.
8291 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
8292 // If this is an output operand with a matching input operand, look up the
8293 // matching input. If their types mismatch, e.g. one is an integer, the
8294 // other is floating point, or their sizes are different, flag it as an
8295 // error.
8296 if (OpInfo.hasMatchingInput()) {
8297 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
8298 patchMatchingInput(OpInfo, Input, DAG);
8299 }
8300
8301 // Compute the constraint code and ConstraintType to use.
8302 TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
8303
8304 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
8305 OpInfo.Type == InlineAsm::isClobber)
8306 continue;
8307
8308 // If this is a memory input, and if the operand is not indirect, do what we
8309 // need to provide an address for the memory input.
8310 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
8311 !OpInfo.isIndirect) {
8312 assert((OpInfo.isMultipleAlternative ||
8313 (OpInfo.Type == InlineAsm::isInput)) &&
8314 "Can only indirectify direct input operands!");
8315
8316 // Memory operands really want the address of the value.
8317 Chain = getAddressForMemoryInput(Chain, getCurSDLoc(), OpInfo, DAG);
8318
8319 // There is no longer a Value* corresponding to this operand.
8320 OpInfo.CallOperandVal = nullptr;
8321
8322 // It is now an indirect operand.
8323 OpInfo.isIndirect = true;
8324 }
8325
8326 }
8327
8328 // AsmNodeOperands - The operands for the ISD::INLINEASM node.
8329 std::vector<SDValue> AsmNodeOperands;
8330 AsmNodeOperands.push_back(SDValue()); // reserve space for input chain
8331 AsmNodeOperands.push_back(DAG.getTargetExternalSymbol(
8332 IA->getAsmString().c_str(), TLI.getProgramPointerTy(DAG.getDataLayout())));
8333
8334 // If we have a !srcloc metadata node associated with it, we want to attach
8335 // this to the ultimately generated inline asm machineinstr. To do this, we
8336 // pass in the third operand as this (potentially null) inline asm MDNode.
8337 const MDNode *SrcLoc = Call.getMetadata("srcloc");
8338 AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
8339
8340 // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
8341 // bits as operand 3.
8342 AsmNodeOperands.push_back(DAG.getTargetConstant(
8343 ExtraInfo.get(), getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
8344
8345 // Third pass: Loop over operands to prepare DAG-level operands.. As part of
8346 // this, assign virtual and physical registers for inputs and otput.
8347 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
8348 // Assign Registers.
8349 SDISelAsmOperandInfo &RefOpInfo =
8350 OpInfo.isMatchingInputConstraint()
8351 ? ConstraintOperands[OpInfo.getMatchedOperand()]
8352 : OpInfo;
8353 GetRegistersForValue(DAG, getCurSDLoc(), OpInfo, RefOpInfo);
8354
8355 auto DetectWriteToReservedRegister = [&]() {
8356 const MachineFunction &MF = DAG.getMachineFunction();
8357 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
8358 for (unsigned Reg : OpInfo.AssignedRegs.Regs) {
8359 if (Register::isPhysicalRegister(Reg) &&
8360 TRI.isInlineAsmReadOnlyReg(MF, Reg)) {
8361 const char *RegName = TRI.getName(Reg);
8362 emitInlineAsmError(Call, "write to reserved register '" +
8363 Twine(RegName) + "'");
8364 return true;
8365 }
8366 }
8367 return false;
8368 };
8369
8370 switch (OpInfo.Type) {
8371 case InlineAsm::isOutput:
8372 if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
8373 unsigned ConstraintID =
8374 TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
8375 assert(ConstraintID != InlineAsm::Constraint_Unknown &&
8376 "Failed to convert memory constraint code to constraint id.");
8377
8378 // Add information to the INLINEASM node to know about this output.
8379 unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
8380 OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID);
8381 AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags, getCurSDLoc(),
8382 MVT::i32));
8383 AsmNodeOperands.push_back(OpInfo.CallOperand);
8384 } else {
8385 // Otherwise, this outputs to a register (directly for C_Register /
8386 // C_RegisterClass, and a target-defined fashion for
8387 // C_Immediate/C_Other). Find a register that we can use.
8388 if (OpInfo.AssignedRegs.Regs.empty()) {
8389 emitInlineAsmError(
8390 Call, "couldn't allocate output register for constraint '" +
8391 Twine(OpInfo.ConstraintCode) + "'");
8392 return;
8393 }
8394
8395 if (DetectWriteToReservedRegister())
8396 return;
8397
8398 // Add information to the INLINEASM node to know that this register is
8399 // set.
8400 OpInfo.AssignedRegs.AddInlineAsmOperands(
8401 OpInfo.isEarlyClobber ? InlineAsm::Kind_RegDefEarlyClobber
8402 : InlineAsm::Kind_RegDef,
8403 false, 0, getCurSDLoc(), DAG, AsmNodeOperands);
8404 }
8405 break;
8406
8407 case InlineAsm::isInput: {
8408 SDValue InOperandVal = OpInfo.CallOperand;
8409
8410 if (OpInfo.isMatchingInputConstraint()) {
8411 // If this is required to match an output register we have already set,
8412 // just use its register.
8413 auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(),
8414 AsmNodeOperands);
8415 unsigned OpFlag =
8416 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
8417 if (InlineAsm::isRegDefKind(OpFlag) ||
8418 InlineAsm::isRegDefEarlyClobberKind(OpFlag)) {
8419 // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
8420 if (OpInfo.isIndirect) {
8421 // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
8422 emitInlineAsmError(Call, "inline asm not supported yet: "
8423 "don't know how to handle tied "
8424 "indirect register inputs");
8425 return;
8426 }
8427
8428 MVT RegVT = AsmNodeOperands[CurOp+1].getSimpleValueType();
8429 SmallVector<unsigned, 4> Regs;
8430
8431 if (const TargetRegisterClass *RC = TLI.getRegClassFor(RegVT)) {
8432 unsigned NumRegs = InlineAsm::getNumOperandRegisters(OpFlag);
8433 MachineRegisterInfo &RegInfo =
8434 DAG.getMachineFunction().getRegInfo();
8435 for (unsigned i = 0; i != NumRegs; ++i)
8436 Regs.push_back(RegInfo.createVirtualRegister(RC));
8437 } else {
8438 emitInlineAsmError(Call,
8439 "inline asm error: This value type register "
8440 "class is not natively supported!");
8441 return;
8442 }
8443
8444 RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.getValueType());
8445
8446 SDLoc dl = getCurSDLoc();
8447 // Use the produced MatchedRegs object to
8448 MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Flag, &Call);
8449 MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse,
8450 true, OpInfo.getMatchedOperand(), dl,
8451 DAG, AsmNodeOperands);
8452 break;
8453 }
8454
8455 assert(InlineAsm::isMemKind(OpFlag) && "Unknown matching constraint!");
8456 assert(InlineAsm::getNumOperandRegisters(OpFlag) == 1 &&
8457 "Unexpected number of operands");
8458 // Add information to the INLINEASM node to know about this input.
8459 // See InlineAsm.h isUseOperandTiedToDef.
8460 OpFlag = InlineAsm::convertMemFlagWordToMatchingFlagWord(OpFlag);
8461 OpFlag = InlineAsm::getFlagWordForMatchingOp(OpFlag,
8462 OpInfo.getMatchedOperand());
8463 AsmNodeOperands.push_back(DAG.getTargetConstant(
8464 OpFlag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
8465 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
8466 break;
8467 }
8468
8469 // Treat indirect 'X' constraint as memory.
8470 if (OpInfo.ConstraintType == TargetLowering::C_Other &&
8471 OpInfo.isIndirect)
8472 OpInfo.ConstraintType = TargetLowering::C_Memory;
8473
8474 if (OpInfo.ConstraintType == TargetLowering::C_Immediate ||
8475 OpInfo.ConstraintType == TargetLowering::C_Other) {
8476 std::vector<SDValue> Ops;
8477 TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
8478 Ops, DAG);
8479 if (Ops.empty()) {
8480 if (OpInfo.ConstraintType == TargetLowering::C_Immediate)
8481 if (isa<ConstantSDNode>(InOperandVal)) {
8482 emitInlineAsmError(Call, "value out of range for constraint '" +
8483 Twine(OpInfo.ConstraintCode) + "'");
8484 return;
8485 }
8486
8487 emitInlineAsmError(Call,
8488 "invalid operand for inline asm constraint '" +
8489 Twine(OpInfo.ConstraintCode) + "'");
8490 return;
8491 }
8492
8493 // Add information to the INLINEASM node to know about this input.
8494 unsigned ResOpType =
8495 InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size());
8496 AsmNodeOperands.push_back(DAG.getTargetConstant(
8497 ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
8498 llvm::append_range(AsmNodeOperands, Ops);
8499 break;
8500 }
8501
8502 if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
8503 assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
8504 assert(InOperandVal.getValueType() ==
8505 TLI.getPointerTy(DAG.getDataLayout()) &&
8506 "Memory operands expect pointer values");
8507
8508 unsigned ConstraintID =
8509 TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
8510 assert(ConstraintID != InlineAsm::Constraint_Unknown &&
8511 "Failed to convert memory constraint code to constraint id.");
8512
8513 // Add information to the INLINEASM node to know about this input.
8514 unsigned ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
8515 ResOpType = InlineAsm::getFlagWordForMem(ResOpType, ConstraintID);
8516 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
8517 getCurSDLoc(),
8518 MVT::i32));
8519 AsmNodeOperands.push_back(InOperandVal);
8520 break;
8521 }
8522
8523 assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
8524 OpInfo.ConstraintType == TargetLowering::C_Register) &&
8525 "Unknown constraint type!");
8526
8527 // TODO: Support this.
8528 if (OpInfo.isIndirect) {
8529 emitInlineAsmError(
8530 Call, "Don't know how to handle indirect register inputs yet "
8531 "for constraint '" +
8532 Twine(OpInfo.ConstraintCode) + "'");
8533 return;
8534 }
8535
8536 // Copy the input into the appropriate registers.
8537 if (OpInfo.AssignedRegs.Regs.empty()) {
8538 emitInlineAsmError(Call,
8539 "couldn't allocate input reg for constraint '" +
8540 Twine(OpInfo.ConstraintCode) + "'");
8541 return;
8542 }
8543
8544 if (DetectWriteToReservedRegister())
8545 return;
8546
8547 SDLoc dl = getCurSDLoc();
8548
8549 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Flag,
8550 &Call);
8551
8552 OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, false, 0,
8553 dl, DAG, AsmNodeOperands);
8554 break;
8555 }
8556 case InlineAsm::isClobber:
8557 // Add the clobbered value to the operand list, so that the register
8558 // allocator is aware that the physreg got clobbered.
8559 if (!OpInfo.AssignedRegs.Regs.empty())
8560 OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_Clobber,
8561 false, 0, getCurSDLoc(), DAG,
8562 AsmNodeOperands);
8563 break;
8564 }
8565 }
8566
8567 // Finish up input operands. Set the input chain and add the flag last.
8568 AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
8569 if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
8570
8571 unsigned ISDOpc = IsCallBr ? ISD::INLINEASM_BR : ISD::INLINEASM;
8572 Chain = DAG.getNode(ISDOpc, getCurSDLoc(),
8573 DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
8574 Flag = Chain.getValue(1);
8575
8576 // Do additional work to generate outputs.
8577
8578 SmallVector<EVT, 1> ResultVTs;
8579 SmallVector<SDValue, 1> ResultValues;
8580 SmallVector<SDValue, 8> OutChains;
8581
8582 llvm::Type *CallResultType = Call.getType();
8583 ArrayRef<Type *> ResultTypes;
8584 if (StructType *StructResult = dyn_cast<StructType>(CallResultType))
8585 ResultTypes = StructResult->elements();
8586 else if (!CallResultType->isVoidTy())
8587 ResultTypes = makeArrayRef(CallResultType);
8588
8589 auto CurResultType = ResultTypes.begin();
8590 auto handleRegAssign = [&](SDValue V) {
8591 assert(CurResultType != ResultTypes.end() && "Unexpected value");
8592 assert((*CurResultType)->isSized() && "Unexpected unsized type");
8593 EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), *CurResultType);
8594 ++CurResultType;
8595 // If the type of the inline asm call site return value is different but has
8596 // same size as the type of the asm output bitcast it. One example of this
8597 // is for vectors with different width / number of elements. This can
8598 // happen for register classes that can contain multiple different value
8599 // types. The preg or vreg allocated may not have the same VT as was
8600 // expected.
8601 //
8602 // This can also happen for a return value that disagrees with the register
8603 // class it is put in, eg. a double in a general-purpose register on a
8604 // 32-bit machine.
8605 if (ResultVT != V.getValueType() &&
8606 ResultVT.getSizeInBits() == V.getValueSizeInBits())
8607 V = DAG.getNode(ISD::BITCAST, getCurSDLoc(), ResultVT, V);
8608 else if (ResultVT != V.getValueType() && ResultVT.isInteger() &&
8609 V.getValueType().isInteger()) {
8610 // If a result value was tied to an input value, the computed result
8611 // may have a wider width than the expected result. Extract the
8612 // relevant portion.
8613 V = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultVT, V);
8614 }
8615 assert(ResultVT == V.getValueType() && "Asm result value mismatch!");
8616 ResultVTs.push_back(ResultVT);
8617 ResultValues.push_back(V);
8618 };
8619
8620 // Deal with output operands.
8621 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
8622 if (OpInfo.Type == InlineAsm::isOutput) {
8623 SDValue Val;
8624 // Skip trivial output operands.
8625 if (OpInfo.AssignedRegs.Regs.empty())
8626 continue;
8627
8628 switch (OpInfo.ConstraintType) {
8629 case TargetLowering::C_Register:
8630 case TargetLowering::C_RegisterClass:
8631 Val = OpInfo.AssignedRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
8632 Chain, &Flag, &Call);
8633 break;
8634 case TargetLowering::C_Immediate:
8635 case TargetLowering::C_Other:
8636 Val = TLI.LowerAsmOutputForConstraint(Chain, Flag, getCurSDLoc(),
8637 OpInfo, DAG);
8638 break;
8639 case TargetLowering::C_Memory:
8640 break; // Already handled.
8641 case TargetLowering::C_Unknown:
8642 assert(false && "Unexpected unknown constraint");
8643 }
8644
8645 // Indirect output manifest as stores. Record output chains.
8646 if (OpInfo.isIndirect) {
8647 const Value *Ptr = OpInfo.CallOperandVal;
8648 assert(Ptr && "Expected value CallOperandVal for indirect asm operand");
8649 SDValue Store = DAG.getStore(Chain, getCurSDLoc(), Val, getValue(Ptr),
8650 MachinePointerInfo(Ptr));
8651 OutChains.push_back(Store);
8652 } else {
8653 // generate CopyFromRegs to associated registers.
8654 assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
8655 if (Val.getOpcode() == ISD::MERGE_VALUES) {
8656 for (const SDValue &V : Val->op_values())
8657 handleRegAssign(V);
8658 } else
8659 handleRegAssign(Val);
8660 }
8661 }
8662 }
8663
8664 // Set results.
8665 if (!ResultValues.empty()) {
8666 assert(CurResultType == ResultTypes.end() &&
8667 "Mismatch in number of ResultTypes");
8668 assert(ResultValues.size() == ResultTypes.size() &&
8669 "Mismatch in number of output operands in asm result");
8670
8671 SDValue V = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
8672 DAG.getVTList(ResultVTs), ResultValues);
8673 setValue(&Call, V);
8674 }
8675
8676 // Collect store chains.
8677 if (!OutChains.empty())
8678 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains);
8679
8680 // Only Update Root if inline assembly has a memory effect.
8681 if (ResultValues.empty() || HasSideEffect || !OutChains.empty() || IsCallBr)
8682 DAG.setRoot(Chain);
8683}
8684
8685void SelectionDAGBuilder::emitInlineAsmError(const CallBase &Call,
8686 const Twine &Message) {
8687 LLVMContext &Ctx = *DAG.getContext();
8688 Ctx.emitError(&Call, Message);
8689
8690 // Make sure we leave the DAG in a valid state
8691 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8692 SmallVector<EVT, 1> ValueVTs;
8693 ComputeValueVTs(TLI, DAG.getDataLayout(), Call.getType(), ValueVTs);
8694
8695 if (ValueVTs.empty())
8696 return;
8697
8698 SmallVector<SDValue, 1> Ops;
8699 for (unsigned i = 0, e = ValueVTs.size(); i != e; ++i)
8700 Ops.push_back(DAG.getUNDEF(ValueVTs[i]));
8701
8702 setValue(&Call, DAG.getMergeValues(Ops, getCurSDLoc()));
8703}
8704
8705void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
8706 DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(),
8707 MVT::Other, getRoot(),
8708 getValue(I.getArgOperand(0)),
8709 DAG.getSrcValue(I.getArgOperand(0))));
8710}
8711
8712void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
8713 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8714 const DataLayout &DL = DAG.getDataLayout();
8715 SDValue V = DAG.getVAArg(
8716 TLI.getMemValueType(DAG.getDataLayout(), I.getType()), getCurSDLoc(),
8717 getRoot(), getValue(I.getOperand(0)), DAG.getSrcValue(I.getOperand(0)),
8718 DL.getABITypeAlign(I.getType()).value());
8719 DAG.setRoot(V.getValue(1));
8720
8721 if (I.getType()->isPointerTy())
8722 V = DAG.getPtrExtOrTrunc(
8723 V, getCurSDLoc(), TLI.getValueType(DAG.getDataLayout(), I.getType()));
8724 setValue(&I, V);
8725}
8726
8727void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
8728 DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(),
8729 MVT::Other, getRoot(),
8730 getValue(I.getArgOperand(0)),
8731 DAG.getSrcValue(I.getArgOperand(0))));
8732}
8733
8734void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
8735 DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(),
8736 MVT::Other, getRoot(),
8737 getValue(I.getArgOperand(0)),
8738 getValue(I.getArgOperand(1)),
8739 DAG.getSrcValue(I.getArgOperand(0)),
8740 DAG.getSrcValue(I.getArgOperand(1))));
8741}
8742
8743SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG,
8744 const Instruction &I,
8745 SDValue Op) {
8746 const MDNode *Range = I.getMetadata(LLVMContext::MD_range);
8747 if (!Range)
8748 return Op;
8749
8750 ConstantRange CR = getConstantRangeFromMetadata(*Range);
8751 if (CR.isFullSet() || CR.isEmptySet() || CR.isUpperWrapped())
8752 return Op;
8753
8754 APInt Lo = CR.getUnsignedMin();
8755 if (!Lo.isMinValue())
8756 return Op;
8757
8758 APInt Hi = CR.getUnsignedMax();
8759 unsigned Bits = std::max(Hi.getActiveBits(),
8760 static_cast<unsigned>(IntegerType::MIN_INT_BITS));
8761
8762 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
8763
8764 SDLoc SL = getCurSDLoc();
8765
8766 SDValue ZExt = DAG.getNode(ISD::AssertZext, SL, Op.getValueType(), Op,
8767 DAG.getValueType(SmallVT));
8768 unsigned NumVals = Op.getNode()->getNumValues();
8769 if (NumVals == 1)
8770 return ZExt;
8771
8772 SmallVector<SDValue, 4> Ops;
8773
8774 Ops.push_back(ZExt);
8775 for (unsigned I = 1; I != NumVals; ++I)
8776 Ops.push_back(Op.getValue(I));
8777
8778 return DAG.getMergeValues(Ops, SL);
8779}
8780
8781/// Populate a CallLowerinInfo (into \p CLI) based on the properties of
8782/// the call being lowered.
8783///
8784/// This is a helper for lowering intrinsics that follow a target calling
8785/// convention or require stack pointer adjustment. Only a subset of the
8786/// intrinsic's operands need to participate in the calling convention.
8787void SelectionDAGBuilder::populateCallLoweringInfo(
8788 TargetLowering::CallLoweringInfo &CLI, const CallBase *Call,
8789 unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy,
8790 bool IsPatchPoint) {
8791 TargetLowering::ArgListTy Args;
8792 Args.reserve(NumArgs);
8793
8794 // Populate the argument list.
8795 // Attributes for args start at offset 1, after the return attribute.
8796 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
8797 ArgI != ArgE; ++ArgI) {
8798 const Value *V = Call->getOperand(ArgI);
8799
8800 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
8801
8802 TargetLowering::ArgListEntry Entry;
8803 Entry.Node = getValue(V);
8804 Entry.Ty = V->getType();
8805 Entry.setAttributes(Call, ArgI);
8806 Args.push_back(Entry);
8807 }
8808
8809 CLI.setDebugLoc(getCurSDLoc())
8810 .setChain(getRoot())
8811 .setCallee(Call->getCallingConv(), ReturnTy, Callee, std::move(Args))
8812 .setDiscardResult(Call->use_empty())
8813 .setIsPatchPoint(IsPatchPoint)
8814 .setIsPreallocated(
8815 Call->countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0);
8816}
8817
8818/// Add a stack map intrinsic call's live variable operands to a stackmap
8819/// or patchpoint target node's operand list.
8820///
8821/// Constants are converted to TargetConstants purely as an optimization to
8822/// avoid constant materialization and register allocation.
8823///
8824/// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not
8825/// generate addess computation nodes, and so FinalizeISel can convert the
8826/// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids
8827/// address materialization and register allocation, but may also be required
8828/// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an
8829/// alloca in the entry block, then the runtime may assume that the alloca's
8830/// StackMap location can be read immediately after compilation and that the
8831/// location is valid at any point during execution (this is similar to the
8832/// assumption made by the llvm.gcroot intrinsic). If the alloca's location were
8833/// only available in a register, then the runtime would need to trap when
8834/// execution reaches the StackMap in order to read the alloca's location.
8835static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx,
8836 const SDLoc &DL, SmallVectorImpl<SDValue> &Ops,
8837 SelectionDAGBuilder &Builder) {
8838 for (unsigned i = StartIdx, e = Call.arg_size(); i != e; ++i) {
8839 SDValue OpVal = Builder.getValue(Call.getArgOperand(i));
8840 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(OpVal)) {
8841 Ops.push_back(
8842 Builder.DAG.getTargetConstant(StackMaps::ConstantOp, DL, MVT::i64));
8843 Ops.push_back(
8844 Builder.DAG.getTargetConstant(C->getSExtValue(), DL, MVT::i64));
8845 } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(OpVal)) {
8846 const TargetLowering &TLI = Builder.DAG.getTargetLoweringInfo();
8847 Ops.push_back(Builder.DAG.getTargetFrameIndex(
8848 FI->getIndex(), TLI.getFrameIndexTy(Builder.DAG.getDataLayout())));
8849 } else
8850 Ops.push_back(OpVal);
8851 }
8852}
8853
8854/// Lower llvm.experimental.stackmap directly to its target opcode.
8855void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
8856 // void @llvm.experimental.stackmap(i32 <id>, i32 <numShadowBytes>,
8857 // [live variables...])
8858
8859 assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value.");
8860
8861 SDValue Chain, InFlag, Callee, NullPtr;
8862 SmallVector<SDValue, 32> Ops;
8863
8864 SDLoc DL = getCurSDLoc();
8865 Callee = getValue(CI.getCalledOperand());
8866 NullPtr = DAG.getIntPtrConstant(0, DL, true);
8867
8868 // The stackmap intrinsic only records the live variables (the arguments
8869 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
8870 // intrinsic, this won't be lowered to a function call. This means we don't
8871 // have to worry about calling conventions and target specific lowering code.
8872 // Instead we perform the call lowering right here.
8873 //
8874 // chain, flag = CALLSEQ_START(chain, 0, 0)
8875 // chain, flag = STACKMAP(id, nbytes, ..., chain, flag)
8876 // chain, flag = CALLSEQ_END(chain, 0, 0, flag)
8877 //
8878 Chain = DAG.getCALLSEQ_START(getRoot(), 0, 0, DL);
8879 InFlag = Chain.getValue(1);
8880
8881 // Add the <id> and <numBytes> constants.
8882 SDValue IDVal = getValue(CI.getOperand(PatchPointOpers::IDPos));
8883 Ops.push_back(DAG.getTargetConstant(
8884 cast<ConstantSDNode>(IDVal)->getZExtValue(), DL, MVT::i64));
8885 SDValue NBytesVal = getValue(CI.getOperand(PatchPointOpers::NBytesPos));
8886 Ops.push_back(DAG.getTargetConstant(
8887 cast<ConstantSDNode>(NBytesVal)->getZExtValue(), DL,
8888 MVT::i32));
8889
8890 // Push live variables for the stack map.
8891 addStackMapLiveVars(CI, 2, DL, Ops, *this);
8892
8893 // We are not pushing any register mask info here on the operands list,
8894 // because the stackmap doesn't clobber anything.
8895
8896 // Push the chain and the glue flag.
8897 Ops.push_back(Chain);
8898 Ops.push_back(InFlag);
8899
8900 // Create the STACKMAP node.
8901 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
8902 SDNode *SM = DAG.getMachineNode(TargetOpcode::STACKMAP, DL, NodeTys, Ops);
8903 Chain = SDValue(SM, 0);
8904 InFlag = Chain.getValue(1);
8905
8906 Chain = DAG.getCALLSEQ_END(Chain, NullPtr, NullPtr, InFlag, DL);
8907
8908 // Stackmaps don't generate values, so nothing goes into the NodeMap.
8909
8910 // Set the root to the target-lowered call chain.
8911 DAG.setRoot(Chain);
8912
8913 // Inform the Frame Information that we have a stackmap in this function.
8914 FuncInfo.MF->getFrameInfo().setHasStackMap();
8915}
8916
8917/// Lower llvm.experimental.patchpoint directly to its target opcode.
8918void SelectionDAGBuilder::visitPatchpoint(const CallBase &CB,
8919 const BasicBlock *EHPadBB) {
8920 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
8921 // i32 <numBytes>,
8922 // i8* <target>,
8923 // i32 <numArgs>,
8924 // [Args...],
8925 // [live variables...])
8926
8927 CallingConv::ID CC = CB.getCallingConv();
8928 bool IsAnyRegCC = CC == CallingConv::AnyReg;
8929 bool HasDef = !CB.getType()->isVoidTy();
8930 SDLoc dl = getCurSDLoc();
8931 SDValue Callee = getValue(CB.getArgOperand(PatchPointOpers::TargetPos));
8932
8933 // Handle immediate and symbolic callees.
8934 if (auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee))
8935 Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
8936 /*isTarget=*/true);
8937 else if (auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee))
8938 Callee = DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
8939 SDLoc(SymbolicCallee),
8940 SymbolicCallee->getValueType(0));
8941
8942 // Get the real number of arguments participating in the call <numArgs>
8943 SDValue NArgVal = getValue(CB.getArgOperand(PatchPointOpers::NArgPos));
8944 unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue();
8945
8946 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
8947 // Intrinsics include all meta-operands up to but not including CC.
8948 unsigned NumMetaOpers = PatchPointOpers::CCPos;
8949 assert(CB.arg_size() >= NumMetaOpers + NumArgs &&
8950 "Not enough arguments provided to the patchpoint intrinsic");
8951
8952 // For AnyRegCC the arguments are lowered later on manually.
8953 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
8954 Type *ReturnTy =
8955 IsAnyRegCC ? Type::getVoidTy(*DAG.getContext()) : CB.getType();
8956
8957 TargetLowering::CallLoweringInfo CLI(DAG);
8958 populateCallLoweringInfo(CLI, &CB, NumMetaOpers, NumCallArgs, Callee,
8959 ReturnTy, true);
8960 std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
8961
8962 SDNode *CallEnd = Result.second.getNode();
8963 if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg))
8964 CallEnd = CallEnd->getOperand(0).getNode();
8965
8966 /// Get a call instruction from the call sequence chain.
8967 /// Tail calls are not allowed.
8968 assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
8969 "Expected a callseq node.");
8970 SDNode *Call = CallEnd->getOperand(0).getNode();
8971 bool HasGlue = Call->getGluedNode();
8972
8973 // Replace the target specific call node with the patchable intrinsic.
8974 SmallVector<SDValue, 8> Ops;
8975
8976 // Add the <id> and <numBytes> constants.
8977 SDValue IDVal = getValue(CB.getArgOperand(PatchPointOpers::IDPos));
8978 Ops.push_back(DAG.getTargetConstant(
8979 cast<ConstantSDNode>(IDVal)->getZExtValue(), dl, MVT::i64));
8980 SDValue NBytesVal = getValue(CB.getArgOperand(PatchPointOpers::NBytesPos));
8981 Ops.push_back(DAG.getTargetConstant(
8982 cast<ConstantSDNode>(NBytesVal)->getZExtValue(), dl,
8983 MVT::i32));
8984
8985 // Add the callee.
8986 Ops.push_back(Callee);
8987
8988 // Adjust <numArgs> to account for any arguments that have been passed on the
8989 // stack instead.
8990 // Call Node: Chain, Target, {Args}, RegMask, [Glue]
8991 unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3);
8992 NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
8993 Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
8994
8995 // Add the calling convention
8996 Ops.push_back(DAG.getTargetConstant((unsigned)CC, dl, MVT::i32));
8997
8998 // Add the arguments we omitted previously. The register allocator should
8999 // place these in any free register.
9000 if (IsAnyRegCC)
9001 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i)
9002 Ops.push_back(getValue(CB.getArgOperand(i)));
9003
9004 // Push the arguments from the call instruction up to the register mask.
9005 SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1;
9006 Ops.append(Call->op_begin() + 2, e);
9007
9008 // Push live variables for the stack map.
9009 addStackMapLiveVars(CB, NumMetaOpers + NumArgs, dl, Ops, *this);
9010
9011 // Push the register mask info.
9012 if (HasGlue)
9013 Ops.push_back(*(Call->op_end()-2));
9014 else
9015 Ops.push_back(*(Call->op_end()-1));
9016
9017 // Push the chain (this is originally the first operand of the call, but
9018 // becomes now the last or second to last operand).
9019 Ops.push_back(*(Call->op_begin()));
9020
9021 // Push the glue flag (last operand).
9022 if (HasGlue)
9023 Ops.push_back(*(Call->op_end()-1));
9024
9025 SDVTList NodeTys;
9026 if (IsAnyRegCC && HasDef) {
9027 // Create the return types based on the intrinsic definition
9028 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9029 SmallVector<EVT, 3> ValueVTs;
9030 ComputeValueVTs(TLI, DAG.getDataLayout(), CB.getType(), ValueVTs);
9031 assert(ValueVTs.size() == 1 && "Expected only one return value type.");
9032
9033 // There is always a chain and a glue type at the end
9034 ValueVTs.push_back(MVT::Other);
9035 ValueVTs.push_back(MVT::Glue);
9036 NodeTys = DAG.getVTList(ValueVTs);
9037 } else
9038 NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
9039
9040 // Replace the target specific call node with a PATCHPOINT node.
9041 MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHPOINT,
9042 dl, NodeTys, Ops);
9043
9044 // Update the NodeMap.
9045 if (HasDef) {
9046 if (IsAnyRegCC)
9047 setValue(&CB, SDValue(MN, 0));
9048 else
9049 setValue(&CB, Result.first);
9050 }
9051
9052 // Fixup the consumers of the intrinsic. The chain and glue may be used in the
9053 // call sequence. Furthermore the location of the chain and glue can change
9054 // when the AnyReg calling convention is used and the intrinsic returns a
9055 // value.
9056 if (IsAnyRegCC && HasDef) {
9057 SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)};
9058 SDValue To[] = {SDValue(MN, 1), SDValue(MN, 2)};
9059 DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
9060 } else
9061 DAG.ReplaceAllUsesWith(Call, MN);
9062 DAG.DeleteNode(Call);
9063
9064 // Inform the Frame Information that we have a patchpoint in this function.
9065 FuncInfo.MF->getFrameInfo().setHasPatchPoint();
9066}
9067
9068void SelectionDAGBuilder::visitVectorReduce(const CallInst &I,
9069 unsigned Intrinsic) {
9070 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9071 SDValue Op1 = getValue(I.getArgOperand(0));
9072 SDValue Op2;
9073 if (I.getNumArgOperands() > 1)
9074 Op2 = getValue(I.getArgOperand(1));
9075 SDLoc dl = getCurSDLoc();
9076 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
9077 SDValue Res;
9078 SDNodeFlags SDFlags;
9079 if (auto *FPMO = dyn_cast<FPMathOperator>(&I))
9080 SDFlags.copyFMF(*FPMO);
9081
9082 switch (Intrinsic) {
9083 case Intrinsic::vector_reduce_fadd:
9084 if (SDFlags.hasAllowReassociation())
9085 Res = DAG.getNode(ISD::FADD, dl, VT, Op1,
9086 DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2, SDFlags),
9087 SDFlags);
9088 else
9089 Res = DAG.getNode(ISD::VECREDUCE_SEQ_FADD, dl, VT, Op1, Op2, SDFlags);
9090 break;
9091 case Intrinsic::vector_reduce_fmul:
9092 if (SDFlags.hasAllowReassociation())
9093 Res = DAG.getNode(ISD::FMUL, dl, VT, Op1,
9094 DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2, SDFlags),
9095 SDFlags);
9096 else
9097 Res = DAG.getNode(ISD::VECREDUCE_SEQ_FMUL, dl, VT, Op1, Op2, SDFlags);
9098 break;
9099 case Intrinsic::vector_reduce_add:
9100 Res = DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1);
9101 break;
9102 case Intrinsic::vector_reduce_mul:
9103 Res = DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1);
9104 break;
9105 case Intrinsic::vector_reduce_and:
9106 Res = DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1);
9107 break;
9108 case Intrinsic::vector_reduce_or:
9109 Res = DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1);
9110 break;
9111 case Intrinsic::vector_reduce_xor:
9112 Res = DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1);
9113 break;
9114 case Intrinsic::vector_reduce_smax:
9115 Res = DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1);
9116 break;
9117 case Intrinsic::vector_reduce_smin:
9118 Res = DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1);
9119 break;
9120 case Intrinsic::vector_reduce_umax:
9121 Res = DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1);
9122 break;
9123 case Intrinsic::vector_reduce_umin:
9124 Res = DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1);
9125 break;
9126 case Intrinsic::vector_reduce_fmax:
9127 Res = DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1, SDFlags);
9128 break;
9129 case Intrinsic::vector_reduce_fmin:
9130 Res = DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1, SDFlags);
9131 break;
9132 default:
9133 llvm_unreachable("Unhandled vector reduce intrinsic");
9134 }
9135 setValue(&I, Res);
9136}
9137
9138/// Returns an AttributeList representing the attributes applied to the return
9139/// value of the given call.
9140static AttributeList getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) {
9141 SmallVector<Attribute::AttrKind, 2> Attrs;
9142 if (CLI.RetSExt)
9143 Attrs.push_back(Attribute::SExt);
9144 if (CLI.RetZExt)
9145 Attrs.push_back(Attribute::ZExt);
9146 if (CLI.IsInReg)
9147 Attrs.push_back(Attribute::InReg);
9148
9149 return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
9150 Attrs);
9151}
9152
9153/// TargetLowering::LowerCallTo - This is the default LowerCallTo
9154/// implementation, which just calls LowerCall.
9155/// FIXME: When all targets are
9156/// migrated to using LowerCall, this hook should be integrated into SDISel.
9157std::pair<SDValue, SDValue>
9158TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
9159 // Handle the incoming return values from the call.
9160 CLI.Ins.clear();
9161 Type *OrigRetTy = CLI.RetTy;
9162 SmallVector<EVT, 4> RetTys;
9163 SmallVector<uint64_t, 4> Offsets;
9164 auto &DL = CLI.DAG.getDataLayout();
9165 ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets);
9166
9167 if (CLI.IsPostTypeLegalization) {
9168 // If we are lowering a libcall after legalization, split the return type.
9169 SmallVector<EVT, 4> OldRetTys;
9170 SmallVector<uint64_t, 4> OldOffsets;
9171 RetTys.swap(OldRetTys);
9172 Offsets.swap(OldOffsets);
9173
9174 for (size_t i = 0, e = OldRetTys.size(); i != e; ++i) {
9175 EVT RetVT = OldRetTys[i];
9176 uint64_t Offset = OldOffsets[i];
9177 MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), RetVT);
9178 unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), RetVT);
9179 unsigned RegisterVTByteSZ = RegisterVT.getSizeInBits() / 8;
9180 RetTys.append(NumRegs, RegisterVT);
9181 for (unsigned j = 0; j != NumRegs; ++j)
9182 Offsets.push_back(Offset + j * RegisterVTByteSZ);
9183 }
9184 }
9185
9186 SmallVector<ISD::OutputArg, 4> Outs;
9187 GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL);
9188
9189 bool CanLowerReturn =
9190 this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(),
9191 CLI.IsVarArg, Outs, CLI.RetTy->getContext());
9192
9193 SDValue DemoteStackSlot;
9194 int DemoteStackIdx = -100;
9195 if (!CanLowerReturn) {
9196 // FIXME: equivalent assert?
9197 // assert(!CS.hasInAllocaArgument() &&
9198 // "sret demotion is incompatible with inalloca");
9199 uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy);
9200 Align Alignment = DL.getPrefTypeAlign(CLI.RetTy);
9201 MachineFunction &MF = CLI.DAG.getMachineFunction();
9202 DemoteStackIdx =
9203 MF.getFrameInfo().CreateStackObject(TySize, Alignment, false);
9204 Type *StackSlotPtrType = PointerType::get(CLI.RetTy,
9205 DL.getAllocaAddrSpace());
9206
9207 DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getFrameIndexTy(DL));
9208 ArgListEntry Entry;
9209 Entry.Node = DemoteStackSlot;
9210 Entry.Ty = StackSlotPtrType;
9211 Entry.IsSExt = false;
9212 Entry.IsZExt = false;
9213 Entry.IsInReg = false;
9214 Entry.IsSRet = true;
9215 Entry.IsNest = false;
9216 Entry.IsByVal = false;
9217 Entry.IsByRef = false;
9218 Entry.IsReturned = false;
9219 Entry.IsSwiftSelf = false;
9220 Entry.IsSwiftError = false;
9221 Entry.IsCFGuardTarget = false;
9222 Entry.Alignment = Alignment;
9223 CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
9224 CLI.NumFixedArgs += 1;
9225 CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext());
9226
9227 // sret demotion isn't compatible with tail-calls, since the sret argument
9228 // points into the callers stack frame.
9229 CLI.IsTailCall = false;
9230 } else {
9231 bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
9232 CLI.RetTy, CLI.CallConv, CLI.IsVarArg);
9233 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
9234 ISD::ArgFlagsTy Flags;
9235 if (NeedsRegBlock) {
9236 Flags.setInConsecutiveRegs();
9237 if (I == RetTys.size() - 1)
9238 Flags.setInConsecutiveRegsLast();
9239 }
9240 EVT VT = RetTys[I];
9241 MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
9242 CLI.CallConv, VT);
9243 unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
9244 CLI.CallConv, VT);
9245 for (unsigned i = 0; i != NumRegs; ++i) {
9246 ISD::InputArg MyFlags;
9247 MyFlags.Flags = Flags;
9248 MyFlags.VT = RegisterVT;
9249 MyFlags.ArgVT = VT;
9250 MyFlags.Used = CLI.IsReturnValueUsed;
9251 if (CLI.RetTy->isPointerTy()) {
9252 MyFlags.Flags.setPointer();
9253 MyFlags.Flags.setPointerAddrSpace(
9254 cast<PointerType>(CLI.RetTy)->getAddressSpace());
9255 }
9256 if (CLI.RetSExt)
9257 MyFlags.Flags.setSExt();
9258 if (CLI.RetZExt)
9259 MyFlags.Flags.setZExt();
9260 if (CLI.IsInReg)
9261 MyFlags.Flags.setInReg();
9262 CLI.Ins.push_back(MyFlags);
9263 }
9264 }
9265 }
9266
9267 // We push in swifterror return as the last element of CLI.Ins.
9268 ArgListTy &Args = CLI.getArgs();
9269 if (supportSwiftError()) {
9270 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
9271 if (Args[i].IsSwiftError) {
9272 ISD::InputArg MyFlags;
9273 MyFlags.VT = getPointerTy(DL);
9274 MyFlags.ArgVT = EVT(getPointerTy(DL));
9275 MyFlags.Flags.setSwiftError();
9276 CLI.Ins.push_back(MyFlags);
9277 }
9278 }
9279 }
9280
9281 // Handle all of the outgoing arguments.
9282 CLI.Outs.clear();
9283 CLI.OutVals.clear();
9284 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
9285 SmallVector<EVT, 4> ValueVTs;
9286 ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs);
9287 // FIXME: Split arguments if CLI.IsPostTypeLegalization
9288 Type *FinalType = Args[i].Ty;
9289 if (Args[i].IsByVal)
9290 FinalType = cast<PointerType>(Args[i].Ty)->getElementType();
9291 bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
9292 FinalType, CLI.CallConv, CLI.IsVarArg);
9293 for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues;
9294 ++Value) {
9295 EVT VT = ValueVTs[Value];
9296 Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
9297 SDValue Op = SDValue(Args[i].Node.getNode(),
9298 Args[i].Node.getResNo() + Value);
9299 ISD::ArgFlagsTy Flags;
9300
9301 // Certain targets (such as MIPS), may have a different ABI alignment
9302 // for a type depending on the context. Give the target a chance to
9303 // specify the alignment it wants.
9304 const Align OriginalAlignment(getABIAlignmentForCallingConv(ArgTy, DL));
9305
9306 if (Args[i].Ty->isPointerTy()) {
9307 Flags.setPointer();
9308 Flags.setPointerAddrSpace(
9309 cast<PointerType>(Args[i].Ty)->getAddressSpace());
9310 }
9311 if (Args[i].IsZExt)
9312 Flags.setZExt();
9313 if (Args[i].IsSExt)
9314 Flags.setSExt();
9315 if (Args[i].IsInReg) {
9316 // If we are using vectorcall calling convention, a structure that is
9317 // passed InReg - is surely an HVA
9318 if (CLI.CallConv == CallingConv::X86_VectorCall &&
9319 isa<StructType>(FinalType)) {
9320 // The first value of a structure is marked
9321 if (0 == Value)
9322 Flags.setHvaStart();
9323 Flags.setHva();
9324 }
9325 // Set InReg Flag
9326 Flags.setInReg();
9327 }
9328 if (Args[i].IsSRet)
9329 Flags.setSRet();
9330 if (Args[i].IsSwiftSelf)
9331 Flags.setSwiftSelf();
9332 if (Args[i].IsSwiftError)
9333 Flags.setSwiftError();
9334 if (Args[i].IsCFGuardTarget)
9335 Flags.setCFGuardTarget();
9336 if (Args[i].IsByVal)
9337 Flags.setByVal();
9338 if (Args[i].IsByRef)
9339 Flags.setByRef();
9340 if (Args[i].IsPreallocated) {
9341 Flags.setPreallocated();
9342 // Set the byval flag for CCAssignFn callbacks that don't know about
9343 // preallocated. This way we can know how many bytes we should've
9344 // allocated and how many bytes a callee cleanup function will pop. If
9345 // we port preallocated to more targets, we'll have to add custom
9346 // preallocated handling in the various CC lowering callbacks.
9347 Flags.setByVal();
9348 }
9349 if (Args[i].IsInAlloca) {
9350 Flags.setInAlloca();
9351 // Set the byval flag for CCAssignFn callbacks that don't know about
9352 // inalloca. This way we can know how many bytes we should've allocated
9353 // and how many bytes a callee cleanup function will pop. If we port
9354 // inalloca to more targets, we'll have to add custom inalloca handling
9355 // in the various CC lowering callbacks.
9356 Flags.setByVal();
9357 }
9358 if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
9359 PointerType *Ty = cast<PointerType>(Args[i].Ty);
9360 Type *ElementTy = Ty->getElementType();
9361
9362 unsigned FrameSize = DL.getTypeAllocSize(
9363 Args[i].ByValType ? Args[i].ByValType : ElementTy);
9364 Flags.setByValSize(FrameSize);
9365
9366 // info is not there but there are cases it cannot get right.
9367 Align FrameAlign;
9368 if (auto MA = Args[i].Alignment)
9369 FrameAlign = *MA;
9370 else
9371 FrameAlign = Align(getByValTypeAlignment(ElementTy, DL));
9372 Flags.setByValAlign(FrameAlign);
9373 }
9374 if (Args[i].IsNest)
9375 Flags.setNest();
9376 if (NeedsRegBlock)
9377 Flags.setInConsecutiveRegs();
9378 Flags.setOrigAlign(OriginalAlignment);
9379
9380 MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
9381 CLI.CallConv, VT);
9382 unsigned NumParts = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
9383 CLI.CallConv, VT);
9384 SmallVector<SDValue, 4> Parts(NumParts);
9385 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
9386
9387 if (Args[i].IsSExt)
9388 ExtendKind = ISD::SIGN_EXTEND;
9389 else if (Args[i].IsZExt)
9390 ExtendKind = ISD::ZERO_EXTEND;
9391
9392 // Conservatively only handle 'returned' on non-vectors that can be lowered,
9393 // for now.
9394 if (Args[i].IsReturned && !Op.getValueType().isVector() &&
9395 CanLowerReturn) {
9396 assert((CLI.RetTy == Args[i].Ty ||
9397 (CLI.RetTy->isPointerTy() && Args[i].Ty->isPointerTy() &&
9398 CLI.RetTy->getPointerAddressSpace() ==
9399 Args[i].Ty->getPointerAddressSpace())) &&
9400 RetTys.size() == NumValues && "unexpected use of 'returned'");
9401 // Before passing 'returned' to the target lowering code, ensure that
9402 // either the register MVT and the actual EVT are the same size or that
9403 // the return value and argument are extended in the same way; in these
9404 // cases it's safe to pass the argument register value unchanged as the
9405 // return register value (although it's at the target's option whether
9406 // to do so)
9407 // TODO: allow code generation to take advantage of partially preserved
9408 // registers rather than clobbering the entire register when the
9409 // parameter extension method is not compatible with the return
9410 // extension method
9411 if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) ||
9412 (ExtendKind != ISD::ANY_EXTEND && CLI.RetSExt == Args[i].IsSExt &&
9413 CLI.RetZExt == Args[i].IsZExt))
9414 Flags.setReturned();
9415 }
9416
9417 getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT, CLI.CB,
9418 CLI.CallConv, ExtendKind);
9419
9420 for (unsigned j = 0; j != NumParts; ++j) {
9421 // if it isn't first piece, alignment must be 1
9422 // For scalable vectors the scalable part is currently handled
9423 // by individual targets, so we just use the known minimum size here.
9424 ISD::OutputArg MyFlags(Flags, Parts[j].getValueType(), VT,
9425 i < CLI.NumFixedArgs, i,
9426 j*Parts[j].getValueType().getStoreSize().getKnownMinSize());
9427 if (NumParts > 1 && j == 0)
9428 MyFlags.Flags.setSplit();
9429 else if (j != 0) {
9430 MyFlags.Flags.setOrigAlign(Align(1));
9431 if (j == NumParts - 1)
9432 MyFlags.Flags.setSplitEnd();
9433 }
9434
9435 CLI.Outs.push_back(MyFlags);
9436 CLI.OutVals.push_back(Parts[j]);
9437 }
9438
9439 if (NeedsRegBlock && Value == NumValues - 1)
9440 CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast();
9441 }
9442 }
9443
9444 SmallVector<SDValue, 4> InVals;
9445 CLI.Chain = LowerCall(CLI, InVals);
9446
9447 // Update CLI.InVals to use outside of this function.
9448 CLI.InVals = InVals;
9449
9450 // Verify that the target's LowerCall behaved as expected.
9451 assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&
9452 "LowerCall didn't return a valid chain!");
9453 assert((!CLI.IsTailCall || InVals.empty()) &&
9454 "LowerCall emitted a return value for a tail call!");
9455 assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&
9456 "LowerCall didn't emit the correct number of values!");
9457
9458 // For a tail call, the return value is merely live-out and there aren't
9459 // any nodes in the DAG representing it. Return a special value to
9460 // indicate that a tail call has been emitted and no more Instructions
9461 // should be processed in the current block.
9462 if (CLI.IsTailCall) {
9463 CLI.DAG.setRoot(CLI.Chain);
9464 return std::make_pair(SDValue(), SDValue());
9465 }
9466
9467#ifndef NDEBUG
9468 for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
9469 assert(InVals[i].getNode() && "LowerCall emitted a null value!");
9470 assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&
9471 "LowerCall emitted a value with the wrong type!");
9472 }
9473#endif
9474
9475 SmallVector<SDValue, 4> ReturnValues;
9476 if (!CanLowerReturn) {
9477 // The instruction result is the result of loading from the
9478 // hidden sret parameter.
9479 SmallVector<EVT, 1> PVTs;
9480 Type *PtrRetTy = OrigRetTy->getPointerTo(DL.getAllocaAddrSpace());
9481
9482 ComputeValueVTs(*this, DL, PtrRetTy, PVTs);
9483 assert(PVTs.size() == 1 && "Pointers should fit in one register");
9484 EVT PtrVT = PVTs[0];
9485
9486 unsigned NumValues = RetTys.size();
9487 ReturnValues.resize(NumValues);
9488 SmallVector<SDValue, 4> Chains(NumValues);
9489
9490 // An aggregate return value cannot wrap around the address space, so
9491 // offsets to its parts don't wrap either.
9492 SDNodeFlags Flags;
9493 Flags.setNoUnsignedWrap(true);
9494
9495 MachineFunction &MF = CLI.DAG.getMachineFunction();
9496 Align HiddenSRetAlign = MF.getFrameInfo().getObjectAlign(DemoteStackIdx);
9497 for (unsigned i = 0; i < NumValues; ++i) {
9498 SDValue Add = CLI.DAG.getNode(ISD::ADD, CLI.DL, PtrVT, DemoteStackSlot,
9499 CLI.DAG.getConstant(Offsets[i], CLI.DL,
9500 PtrVT), Flags);
9501 SDValue L = CLI.DAG.getLoad(
9502 RetTys[i], CLI.DL, CLI.Chain, Add,
9503 MachinePointerInfo::getFixedStack(CLI.DAG.getMachineFunction(),
9504 DemoteStackIdx, Offsets[i]),
9505 HiddenSRetAlign);
9506 ReturnValues[i] = L;
9507 Chains[i] = L.getValue(1);
9508 }
9509
9510 CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains);
9511 } else {
9512 // Collect the legal value parts into potentially illegal values
9513 // that correspond to the original function's return values.
9514 Optional<ISD::NodeType> AssertOp;
9515 if (CLI.RetSExt)
9516 AssertOp = ISD::AssertSext;
9517 else if (CLI.RetZExt)
9518 AssertOp = ISD::AssertZext;
9519 unsigned CurReg = 0;
9520 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
9521 EVT VT = RetTys[I];
9522 MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
9523 CLI.CallConv, VT);
9524 unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
9525 CLI.CallConv, VT);
9526
9527 ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
9528 NumRegs, RegisterVT, VT, nullptr,
9529 CLI.CallConv, AssertOp));
9530 CurReg += NumRegs;
9531 }
9532
9533 // For a function returning void, there is no return value. We can't create
9534 // such a node, so we just return a null return value in that case. In
9535 // that case, nothing will actually look at the value.
9536 if (ReturnValues.empty())
9537 return std::make_pair(SDValue(), CLI.Chain);
9538 }
9539
9540 SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
9541 CLI.DAG.getVTList(RetTys), ReturnValues);
9542 return std::make_pair(Res, CLI.Chain);
9543}
9544
9545/// Places new result values for the node in Results (their number
9546/// and types must exactly match those of the original return values of
9547/// the node), or leaves Results empty, which indicates that the node is not
9548/// to be custom lowered after all.
9549void TargetLowering::LowerOperationWrapper(SDNode *N,
9550 SmallVectorImpl<SDValue> &Results,
9551 SelectionDAG &DAG) const {
9552 SDValue Res = LowerOperation(SDValue(N, 0), DAG);
9553
9554 if (!Res.getNode())
9555 return;
9556
9557 // If the original node has one result, take the return value from
9558 // LowerOperation as is. It might not be result number 0.
9559 if (N->getNumValues() == 1) {
9560 Results.push_back(Res);
9561 return;
9562 }
9563
9564 // If the original node has multiple results, then the return node should
9565 // have the same number of results.
9566 assert((N->getNumValues() == Res->getNumValues()) &&
9567 "Lowering returned the wrong number of results!");
9568
9569 // Places new result values base on N result number.
9570 for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
9571 Results.push_back(Res.getValue(I));
9572}
9573
9574SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
9575 llvm_unreachable("LowerOperation not implemented for this target!");
9576}
9577
9578void
9579SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
9580 SDValue Op = getNonRegisterValue(V);
9581 assert((Op.getOpcode() != ISD::CopyFromReg ||
9582 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
9583 "Copy from a reg to the same reg!");
9584 assert(!Register::isPhysicalRegister(Reg) && "Is a physreg");
9585
9586 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9587 // If this is an InlineAsm we have to match the registers required, not the
9588 // notional registers required by the type.
9589
9590 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, V->getType(),
9591 None); // This is not an ABI copy.
9592 SDValue Chain = DAG.getEntryNode();
9593
9594 ISD::NodeType ExtendType = (FuncInfo.PreferredExtendType.find(V) ==
9595 FuncInfo.PreferredExtendType.end())
9596 ? ISD::ANY_EXTEND
9597 : FuncInfo.PreferredExtendType[V];
9598 RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V, ExtendType);
9599 PendingExports.push_back(Chain);
9600}
9601
9602#include "llvm/CodeGen/SelectionDAGISel.h"
9603
9604/// isOnlyUsedInEntryBlock - If the specified argument is only used in the
9605/// entry block, return true. This includes arguments used by switches, since
9606/// the switch may expand into multiple basic blocks.
9607static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
9608 // With FastISel active, we may be splitting blocks, so force creation
9609 // of virtual registers for all non-dead arguments.
9610 if (FastISel)
9611 return A->use_empty();
9612
9613 const BasicBlock &Entry = A->getParent()->front();
9614 for (const User *U : A->users())
9615 if (cast<Instruction>(U)->getParent() != &Entry || isa<SwitchInst>(U))
9616 return false; // Use not in entry block.
9617
9618 return true;
9619}
9620
9621using ArgCopyElisionMapTy =
9622 DenseMap<const Argument *,
9623 std::pair<const AllocaInst *, const StoreInst *>>;
9624
9625/// Scan the entry block of the function in FuncInfo for arguments that look
9626/// like copies into a local alloca. Record any copied arguments in
9627/// ArgCopyElisionCandidates.
9628static void
9629findArgumentCopyElisionCandidates(const DataLayout &DL,
9630 FunctionLoweringInfo *FuncInfo,
9631 ArgCopyElisionMapTy &ArgCopyElisionCandidates) {
9632 // Record the state of every static alloca used in the entry block. Argument
9633 // allocas are all used in the entry block, so we need approximately as many
9634 // entries as we have arguments.
9635 enum StaticAllocaInfo { Unknown, Clobbered, Elidable };
9636 SmallDenseMap<const AllocaInst *, StaticAllocaInfo, 8> StaticAllocas;
9637 unsigned NumArgs = FuncInfo->Fn->arg_size();
9638 StaticAllocas.reserve(NumArgs * 2);
9639
9640 auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * {
9641 if (!V)
9642 return nullptr;
9643 V = V->stripPointerCasts();
9644 const auto *AI = dyn_cast<AllocaInst>(V);
9645 if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(AI))
9646 return nullptr;
9647 auto Iter = StaticAllocas.insert({AI, Unknown});
9648 return &Iter.first->second;
9649 };
9650
9651 // Look for stores of arguments to static allocas. Look through bitcasts and
9652 // GEPs to handle type coercions, as long as the alloca is fully initialized
9653 // by the store. Any non-store use of an alloca escapes it and any subsequent
9654 // unanalyzed store might write it.
9655 // FIXME: Handle structs initialized with multiple stores.
9656 for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) {
9657 // Look for stores, and handle non-store uses conservatively.
9658 const auto *SI = dyn_cast<StoreInst>(&I);
9659 if (!SI) {
9660 // We will look through cast uses, so ignore them completely.
9661 if (I.isCast())
9662 continue;
9663 // Ignore debug info and pseudo op intrinsics, they don't escape or store
9664 // to allocas.
9665 if (I.isDebugOrPseudoInst())
9666 continue;
9667 // This is an unknown instruction. Assume it escapes or writes to all
9668 // static alloca operands.
9669 for (const Use &U : I.operands()) {
9670 if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U))
9671 *Info = StaticAllocaInfo::Clobbered;
9672 }
9673 continue;
9674 }
9675
9676 // If the stored value is a static alloca, mark it as escaped.
9677 if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand()))
9678 *Info = StaticAllocaInfo::Clobbered;
9679
9680 // Check if the destination is a static alloca.
9681 const Value *Dst = SI->getPointerOperand()->stripPointerCasts();
9682 StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst);
9683 if (!Info)
9684 continue;
9685 const AllocaInst *AI = cast<AllocaInst>(Dst);
9686
9687 // Skip allocas that have been initialized or clobbered.
9688 if (*Info != StaticAllocaInfo::Unknown)
9689 continue;
9690
9691 // Check if the stored value is an argument, and that this store fully
9692 // initializes the alloca. Don't elide copies from the same argument twice.
9693 const Value *Val = SI->getValueOperand()->stripPointerCasts();
9694 const auto *Arg = dyn_cast<Argument>(Val);
9695 if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
9696 Arg->getType()->isEmptyTy() ||
9697 DL.getTypeStoreSize(Arg->getType()) !=
9698 DL.getTypeAllocSize(AI->getAllocatedType()) ||
9699 ArgCopyElisionCandidates.count(Arg)) {
9700 *Info = StaticAllocaInfo::Clobbered;
9701 continue;
9702 }
9703
9704 LLVM_DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI
9705 << '\n');
9706
9707 // Mark this alloca and store for argument copy elision.
9708 *Info = StaticAllocaInfo::Elidable;
9709 ArgCopyElisionCandidates.insert({Arg, {AI, SI}});
9710
9711 // Stop scanning if we've seen all arguments. This will happen early in -O0
9712 // builds, which is useful, because -O0 builds have large entry blocks and
9713 // many allocas.
9714 if (ArgCopyElisionCandidates.size() == NumArgs)
9715 break;
9716 }
9717}
9718
9719/// Try to elide argument copies from memory into a local alloca. Succeeds if
9720/// ArgVal is a load from a suitable fixed stack object.
9721static void tryToElideArgumentCopy(
9722 FunctionLoweringInfo &FuncInfo, SmallVectorImpl<SDValue> &Chains,
9723 DenseMap<int, int> &ArgCopyElisionFrameIndexMap,
9724 SmallPtrSetImpl<const Instruction *> &ElidedArgCopyInstrs,
9725 ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg,
9726 SDValue ArgVal, bool &ArgHasUses) {
9727 // Check if this is a load from a fixed stack object.
9728 auto *LNode = dyn_cast<LoadSDNode>(ArgVal);
9729 if (!LNode)
9730 return;
9731 auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode());
9732 if (!FINode)
9733 return;
9734
9735 // Check that the fixed stack object is the right size and alignment.
9736 // Look at the alignment that the user wrote on the alloca instead of looking
9737 // at the stack object.
9738 auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
9739 assert(ArgCopyIter != ArgCopyElisionCandidates.end());
9740 const AllocaInst *AI = ArgCopyIter->second.first;
9741 int FixedIndex = FINode->getIndex();
9742 int &AllocaIndex = FuncInfo.StaticAllocaMap[AI];
9743 int OldIndex = AllocaIndex;
9744 MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo();
9745 if (MFI.getObjectSize(FixedIndex) != MFI.getObjectSize(OldIndex)) {
9746 LLVM_DEBUG(
9747 dbgs() << " argument copy elision failed due to bad fixed stack "
9748 "object size\n");
9749 return;
9750 }
9751 Align RequiredAlignment = AI->getAlign();
9752 if (MFI.getObjectAlign(FixedIndex) < RequiredAlignment) {
9753 LLVM_DEBUG(dbgs() << " argument copy elision failed: alignment of alloca "
9754 "greater than stack argument alignment ("
9755 << DebugStr(RequiredAlignment) << " vs "
9756 << DebugStr(MFI.getObjectAlign(FixedIndex)) << ")\n");
9757 return;
9758 }
9759
9760 // Perform the elision. Delete the old stack object and replace its only use
9761 // in the variable info map. Mark the stack object as mutable.
9762 LLVM_DEBUG({
9763 dbgs() << "Eliding argument copy from " << Arg << " to " << *AI << '\n'
9764 << " Replacing frame index " << OldIndex << " with " << FixedIndex
9765 << '\n';
9766 });
9767 MFI.RemoveStackObject(OldIndex);
9768 MFI.setIsImmutableObjectIndex(FixedIndex, false);
9769 AllocaIndex = FixedIndex;
9770 ArgCopyElisionFrameIndexMap.insert({OldIndex, FixedIndex});
9771 Chains.push_back(ArgVal.getValue(1));
9772
9773 // Avoid emitting code for the store implementing the copy.
9774 const StoreInst *SI = ArgCopyIter->second.second;
9775 ElidedArgCopyInstrs.insert(SI);
9776
9777 // Check for uses of the argument again so that we can avoid exporting ArgVal
9778 // if it is't used by anything other than the store.
9779 for (const Value *U : Arg.users()) {
9780 if (U != SI) {
9781 ArgHasUses = true;
9782 break;
9783 }
9784 }
9785}
9786
9787void SelectionDAGISel::LowerArguments(const Function &F) {
9788 SelectionDAG &DAG = SDB->DAG;
9789 SDLoc dl = SDB->getCurSDLoc();
9790 const DataLayout &DL = DAG.getDataLayout();
9791 SmallVector<ISD::InputArg, 16> Ins;
9792
9793 // In Naked functions we aren't going to save any registers.
9794 if (F.hasFnAttribute(Attribute::Naked))
9795 return;
9796
9797 if (!FuncInfo->CanLowerReturn) {
9798 // Put in an sret pointer parameter before all the other parameters.
9799 SmallVector<EVT, 1> ValueVTs;
9800 ComputeValueVTs(*TLI, DAG.getDataLayout(),
9801 F.getReturnType()->getPointerTo(
9802 DAG.getDataLayout().getAllocaAddrSpace()),
9803 ValueVTs);
9804
9805 // NOTE: Assuming that a pointer will never break down to more than one VT
9806 // or one register.
9807 ISD::ArgFlagsTy Flags;
9808 Flags.setSRet();
9809 MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
9810 ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true,
9811 ISD::InputArg::NoArgIndex, 0);
9812 Ins.push_back(RetArg);
9813 }
9814
9815 // Look for stores of arguments to static allocas. Mark such arguments with a
9816 // flag to ask the target to give us the memory location of that argument if
9817 // available.
9818 ArgCopyElisionMapTy ArgCopyElisionCandidates;
9819 findArgumentCopyElisionCandidates(DL, FuncInfo.get(),
9820 ArgCopyElisionCandidates);
9821
9822 // Set up the incoming argument description vector.
9823 for (const Argument &Arg : F.args()) {
9824 unsigned ArgNo = Arg.getArgNo();
9825 SmallVector<EVT, 4> ValueVTs;
9826 ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
9827 bool isArgValueUsed = !Arg.use_empty();
9828 unsigned PartBase = 0;
9829 Type *FinalType = Arg.getType();
9830 if (Arg.hasAttribute(Attribute::ByVal))
9831 FinalType = Arg.getParamByValType();
9832 bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
9833 FinalType, F.getCallingConv(), F.isVarArg());
9834 for (unsigned Value = 0, NumValues = ValueVTs.size();
9835 Value != NumValues; ++Value) {
9836 EVT VT = ValueVTs[Value];
9837 Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
9838 ISD::ArgFlagsTy Flags;
9839
9840 // Certain targets (such as MIPS), may have a different ABI alignment
9841 // for a type depending on the context. Give the target a chance to
9842 // specify the alignment it wants.
9843 const Align OriginalAlignment(
9844 TLI->getABIAlignmentForCallingConv(ArgTy, DL));
9845
9846 if (Arg.getType()->isPointerTy()) {
9847 Flags.setPointer();
9848 Flags.setPointerAddrSpace(
9849 cast<PointerType>(Arg.getType())->getAddressSpace());
9850 }
9851 if (Arg.hasAttribute(Attribute::ZExt))
9852 Flags.setZExt();
9853 if (Arg.hasAttribute(Attribute::SExt))
9854 Flags.setSExt();
9855 if (Arg.hasAttribute(Attribute::InReg)) {
9856 // If we are using vectorcall calling convention, a structure that is
9857 // passed InReg - is surely an HVA
9858 if (F.getCallingConv() == CallingConv::X86_VectorCall &&
9859 isa<StructType>(Arg.getType())) {
9860 // The first value of a structure is marked
9861 if (0 == Value)
9862 Flags.setHvaStart();
9863 Flags.setHva();
9864 }
9865 // Set InReg Flag
9866 Flags.setInReg();
9867 }
9868 if (Arg.hasAttribute(Attribute::StructRet))
9869 Flags.setSRet();
9870 if (Arg.hasAttribute(Attribute::SwiftSelf))
9871 Flags.setSwiftSelf();
9872 if (Arg.hasAttribute(Attribute::SwiftError))
9873 Flags.setSwiftError();
9874 if (Arg.hasAttribute(Attribute::ByVal))
9875 Flags.setByVal();
9876 if (Arg.hasAttribute(Attribute::ByRef))
9877 Flags.setByRef();
9878 if (Arg.hasAttribute(Attribute::InAlloca)) {
9879 Flags.setInAlloca();
9880 // Set the byval flag for CCAssignFn callbacks that don't know about
9881 // inalloca. This way we can know how many bytes we should've allocated
9882 // and how many bytes a callee cleanup function will pop. If we port
9883 // inalloca to more targets, we'll have to add custom inalloca handling
9884 // in the various CC lowering callbacks.
9885 Flags.setByVal();
9886 }
9887 if (Arg.hasAttribute(Attribute::Preallocated)) {
9888 Flags.setPreallocated();
9889 // Set the byval flag for CCAssignFn callbacks that don't know about
9890 // preallocated. This way we can know how many bytes we should've
9891 // allocated and how many bytes a callee cleanup function will pop. If
9892 // we port preallocated to more targets, we'll have to add custom
9893 // preallocated handling in the various CC lowering callbacks.
9894 Flags.setByVal();
9895 }
9896
9897 Type *ArgMemTy = nullptr;
9898 if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated() ||
9899 Flags.isByRef()) {
9900 if (!ArgMemTy)
9901 ArgMemTy = Arg.getPointeeInMemoryValueType();
9902
9903 uint64_t MemSize = DL.getTypeAllocSize(ArgMemTy);
9904
9905 // For in-memory arguments, size and alignment should be passed from FE.
9906 // BE will guess if this info is not there but there are cases it cannot
9907 // get right.
9908 MaybeAlign MemAlign = Arg.getParamAlign();
9909 if (!MemAlign)
9910 MemAlign = Align(TLI->getByValTypeAlignment(ArgMemTy, DL));
9911
9912 if (Flags.isByRef()) {
9913 Flags.setByRefSize(MemSize);
9914 Flags.setByRefAlign(*MemAlign);
9915 } else {
9916 Flags.setByValSize(MemSize);
9917 Flags.setByValAlign(*MemAlign);
9918 }
9919 }
9920
9921 if (Arg.hasAttribute(Attribute::Nest))
9922 Flags.setNest();
9923 if (NeedsRegBlock)
9924 Flags.setInConsecutiveRegs();
9925 Flags.setOrigAlign(OriginalAlignment);
9926 if (ArgCopyElisionCandidates.count(&Arg))
9927 Flags.setCopyElisionCandidate();
9928 if (Arg.hasAttribute(Attribute::Returned))
9929 Flags.setReturned();
9930
9931 MVT RegisterVT = TLI->getRegisterTypeForCallingConv(
9932 *CurDAG->getContext(), F.getCallingConv(), VT);
9933 unsigned NumRegs = TLI->getNumRegistersForCallingConv(
9934 *CurDAG->getContext(), F.getCallingConv(), VT);
9935 for (unsigned i = 0; i != NumRegs; ++i) {
9936 // For scalable vectors, use the minimum size; individual targets
9937 // are responsible for handling scalable vector arguments and
9938 // return values.
9939 ISD::InputArg MyFlags(Flags, RegisterVT, VT, isArgValueUsed,
9940 ArgNo, PartBase+i*RegisterVT.getStoreSize().getKnownMinSize());
9941 if (NumRegs > 1 && i == 0)
9942 MyFlags.Flags.setSplit();
9943 // if it isn't first piece, alignment must be 1
9944 else if (i > 0) {
9945 MyFlags.Flags.setOrigAlign(Align(1));
9946 if (i == NumRegs - 1)
9947 MyFlags.Flags.setSplitEnd();
9948 }
9949 Ins.push_back(MyFlags);
9950 }
9951 if (NeedsRegBlock && Value == NumValues - 1)
9952 Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast();
9953 PartBase += VT.getStoreSize().getKnownMinSize();
9954 }
9955 }
9956
9957 // Call the target to set up the argument values.
9958 SmallVector<SDValue, 8> InVals;
9959 SDValue NewRoot = TLI->LowerFormalArguments(
9960 DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals);
9961
9962 // Verify that the target's LowerFormalArguments behaved as expected.
9963 assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
9964 "LowerFormalArguments didn't return a valid chain!");
9965 assert(InVals.size() == Ins.size() &&
9966 "LowerFormalArguments didn't emit the correct number of values!");
9967 LLVM_DEBUG({
9968 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
9969 assert(InVals[i].getNode() &&
9970 "LowerFormalArguments emitted a null value!");
9971 assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
9972 "LowerFormalArguments emitted a value with the wrong type!");
9973 }
9974 });
9975
9976 // Update the DAG with the new chain value resulting from argument lowering.
9977 DAG.setRoot(NewRoot);
9978
9979 // Set up the argument values.
9980 unsigned i = 0;
9981 if (!FuncInfo->CanLowerReturn) {
9982 // Create a virtual register for the sret pointer, and put in a copy
9983 // from the sret argument into it.
9984 SmallVector<EVT, 1> ValueVTs;
9985 ComputeValueVTs(*TLI, DAG.getDataLayout(),
9986 F.getReturnType()->getPointerTo(
9987 DAG.getDataLayout().getAllocaAddrSpace()),
9988 ValueVTs);
9989 MVT VT = ValueVTs[0].getSimpleVT();
9990 MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
9991 Optional<ISD::NodeType> AssertOp = None;
9992 SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1, RegVT, VT,
9993 nullptr, F.getCallingConv(), AssertOp);
9994
9995 MachineFunction& MF = SDB->DAG.getMachineFunction();
9996 MachineRegisterInfo& RegInfo = MF.getRegInfo();
9997 Register SRetReg =
9998 RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
9999 FuncInfo->DemoteRegister = SRetReg;
10000 NewRoot =
10001 SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue);
10002 DAG.setRoot(NewRoot);
10003
10004 // i indexes lowered arguments. Bump it past the hidden sret argument.
10005 ++i;
10006 }
10007
10008 SmallVector<SDValue, 4> Chains;
10009 DenseMap<int, int> ArgCopyElisionFrameIndexMap;
10010 for (const Argument &Arg : F.args()) {
10011 SmallVector<SDValue, 4> ArgValues;
10012 SmallVector<EVT, 4> ValueVTs;
10013 ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
10014 unsigned NumValues = ValueVTs.size();
10015 if (NumValues == 0)
10016 continue;
10017
10018 bool ArgHasUses = !Arg.use_empty();
10019
10020 // Elide the copying store if the target loaded this argument from a
10021 // suitable fixed stack object.
10022 if (Ins[i].Flags.isCopyElisionCandidate()) {
10023 tryToElideArgumentCopy(*FuncInfo, Chains, ArgCopyElisionFrameIndexMap,
10024 ElidedArgCopyInstrs, ArgCopyElisionCandidates, Arg,
10025 InVals[i], ArgHasUses);
10026 }
10027
10028 // If this argument is unused then remember its value. It is used to generate
10029 // debugging information.
10030 bool isSwiftErrorArg =
10031 TLI->supportSwiftError() &&
10032 Arg.hasAttribute(Attribute::SwiftError);
10033 if (!ArgHasUses && !isSwiftErrorArg) {
10034 SDB->setUnusedArgValue(&Arg, InVals[i]);
10035
10036 // Also remember any frame index for use in FastISel.
10037 if (FrameIndexSDNode *FI =
10038 dyn_cast<FrameIndexSDNode>(InVals[i].getNode()))
10039 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
10040 }
10041
10042 for (unsigned Val = 0; Val != NumValues; ++Val) {
10043 EVT VT = ValueVTs[Val];
10044 MVT PartVT = TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(),
10045 F.getCallingConv(), VT);
10046 unsigned NumParts = TLI->getNumRegistersForCallingConv(
10047 *CurDAG->getContext(), F.getCallingConv(), VT);
10048
10049 // Even an apparent 'unused' swifterror argument needs to be returned. So
10050 // we do generate a copy for it that can be used on return from the
10051 // function.
10052 if (ArgHasUses || isSwiftErrorArg) {
10053 Optional<ISD::NodeType> AssertOp;
10054 if (Arg.hasAttribute(Attribute::SExt))
10055 AssertOp = ISD::AssertSext;
10056 else if (Arg.hasAttribute(Attribute::ZExt))
10057 AssertOp = ISD::AssertZext;
10058
10059 ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
10060 PartVT, VT, nullptr,
10061 F.getCallingConv(), AssertOp));
10062 }
10063
10064 i += NumParts;
10065 }
10066
10067 // We don't need to do anything else for unused arguments.
10068 if (ArgValues.empty())
10069 continue;
10070
10071 // Note down frame index.
10072 if (FrameIndexSDNode *FI =
10073 dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
10074 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
10075
10076 SDValue Res = DAG.getMergeValues(makeArrayRef(ArgValues.data(), NumValues),
10077 SDB->getCurSDLoc());
10078
10079 SDB->setValue(&Arg, Res);
10080 if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
10081 // We want to associate the argument with the frame index, among
10082 // involved operands, that correspond to the lowest address. The
10083 // getCopyFromParts function, called earlier, is swapping the order of
10084 // the operands to BUILD_PAIR depending on endianness. The result of
10085 // that swapping is that the least significant bits of the argument will
10086 // be in the first operand of the BUILD_PAIR node, and the most
10087 // significant bits will be in the second operand.
10088 unsigned LowAddressOp = DAG.getDataLayout().isBigEndian() ? 1 : 0;
10089 if (LoadSDNode *LNode =
10090 dyn_cast<LoadSDNode>(Res.getOperand(LowAddressOp).getNode()))
10091 if (FrameIndexSDNode *FI =
10092 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
10093 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
10094 }
10095
10096 // Analyses past this point are naive and don't expect an assertion.
10097 if (Res.getOpcode() == ISD::AssertZext)
10098 Res = Res.getOperand(0);
10099
10100 // Update the SwiftErrorVRegDefMap.
10101 if (Res.getOpcode() == ISD::CopyFromReg && isSwiftErrorArg) {
10102 unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
10103 if (Register::isVirtualRegister(Reg))
10104 SwiftError->setCurrentVReg(FuncInfo->MBB, SwiftError->getFunctionArg(),
10105 Reg);
10106 }
10107
10108 // If this argument is live outside of the entry block, insert a copy from
10109 // wherever we got it to the vreg that other BB's will reference it as.
10110 if (Res.getOpcode() == ISD::CopyFromReg) {
10111 // If we can, though, try to skip creating an unnecessary vreg.
10112 // FIXME: This isn't very clean... it would be nice to make this more
10113 // general.
10114 unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
10115 if (Register::isVirtualRegister(Reg)) {
10116 FuncInfo->ValueMap[&Arg] = Reg;
10117 continue;
10118 }
10119 }
10120 if (!isOnlyUsedInEntryBlock(&Arg, TM.Options.EnableFastISel)) {
10121 FuncInfo->InitializeRegForValue(&Arg);
10122 SDB->CopyToExportRegsIfNeeded(&Arg);
10123 }
10124 }
10125
10126 if (!Chains.empty()) {
10127 Chains.push_back(NewRoot);
10128 NewRoot = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
10129 }
10130
10131 DAG.setRoot(NewRoot);
10132
10133 assert(i == InVals.size() && "Argument register count mismatch!");
10134
10135 // If any argument copy elisions occurred and we have debug info, update the
10136 // stale frame indices used in the dbg.declare variable info table.
10137 MachineFunction::VariableDbgInfoMapTy &DbgDeclareInfo = MF->getVariableDbgInfo();
10138 if (!DbgDeclareInfo.empty() && !ArgCopyElisionFrameIndexMap.empty()) {
10139 for (MachineFunction::VariableDbgInfo &VI : DbgDeclareInfo) {
10140 auto I = ArgCopyElisionFrameIndexMap.find(VI.Slot);
10141 if (I != ArgCopyElisionFrameIndexMap.end())
10142 VI.Slot = I->second;
10143 }
10144 }
10145
10146 // Finally, if the target has anything special to do, allow it to do so.
10147 emitFunctionEntryCode();
10148}
10149
10150/// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
10151/// ensure constants are generated when needed. Remember the virtual registers
10152/// that need to be added to the Machine PHI nodes as input. We cannot just
10153/// directly add them, because expansion might result in multiple MBB's for one
10154/// BB. As such, the start of the BB might correspond to a different MBB than
10155/// the end.
10156void
10157SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
10158 const Instruction *TI = LLVMBB->getTerminator();
10159
10160 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
10161
10162 // Check PHI nodes in successors that expect a value to be available from this
10163 // block.
10164 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
10165 const BasicBlock *SuccBB = TI->getSuccessor(succ);
10166 if (!isa<PHINode>(SuccBB->begin())) continue;
10167 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
10168
10169 // If this terminator has multiple identical successors (common for
10170 // switches), only handle each succ once.
10171 if (!SuccsHandled.insert(SuccMBB).second)
10172 continue;
10173
10174 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
10175
10176 // At this point we know that there is a 1-1 correspondence between LLVM PHI
10177 // nodes and Machine PHI nodes, but the incoming operands have not been
10178 // emitted yet.
10179 for (const PHINode &PN : SuccBB->phis()) {
10180 // Ignore dead phi's.
10181 if (PN.use_empty())
10182 continue;
10183
10184 // Skip empty types
10185 if (PN.getType()->isEmptyTy())
10186 continue;
10187
10188 unsigned Reg;
10189 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
10190
10191 if (const Constant *C = dyn_cast<Constant>(PHIOp)) {
10192 unsigned &RegOut = ConstantsOut[C];
10193 if (RegOut == 0) {
10194 RegOut = FuncInfo.CreateRegs(C);
10195 CopyValueToVirtualRegister(C, RegOut);
10196 }
10197 Reg = RegOut;
10198 } else {
10199 DenseMap<const Value *, Register>::iterator I =
10200 FuncInfo.ValueMap.find(PHIOp);
10201 if (I != FuncInfo.ValueMap.end())
10202 Reg = I->second;
10203 else {
10204 assert(isa<AllocaInst>(PHIOp) &&
10205 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
10206 "Didn't codegen value into a register!??");
10207 Reg = FuncInfo.CreateRegs(PHIOp);
10208 CopyValueToVirtualRegister(PHIOp, Reg);
10209 }
10210 }
10211
10212 // Remember that this register needs to added to the machine PHI node as
10213 // the input for this MBB.
10214 SmallVector<EVT, 4> ValueVTs;
10215 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10216 ComputeValueVTs(TLI, DAG.getDataLayout(), PN.getType(), ValueVTs);
10217 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
10218 EVT VT = ValueVTs[vti];
10219 unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
10220 for (unsigned i = 0, e = NumRegisters; i != e; ++i)
10221 FuncInfo.PHINodesToUpdate.push_back(
10222 std::make_pair(&*MBBI++, Reg + i));
10223 Reg += NumRegisters;
10224 }
10225 }
10226 }
10227
10228 ConstantsOut.clear();
10229}
10230
10231/// Add a successor MBB to ParentMBB< creating a new MachineBB for BB if SuccMBB
10232/// is 0.
10233MachineBasicBlock *
10234SelectionDAGBuilder::StackProtectorDescriptor::
10235AddSuccessorMBB(const BasicBlock *BB,
10236 MachineBasicBlock *ParentMBB,
10237 bool IsLikely,
10238 MachineBasicBlock *SuccMBB) {
10239 // If SuccBB has not been created yet, create it.
10240 if (!SuccMBB) {
10241 MachineFunction *MF = ParentMBB->getParent();
10242 MachineFunction::iterator BBI(ParentMBB);
10243 SuccMBB = MF->CreateMachineBasicBlock(BB);
10244 MF->insert(++BBI, SuccMBB);
10245 }
10246 // Add it as a successor of ParentMBB.
10247 ParentMBB->addSuccessor(
10248 SuccMBB, BranchProbabilityInfo::getBranchProbStackProtector(IsLikely));
10249 return SuccMBB;
10250}
10251
10252MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) {
10253 MachineFunction::iterator I(MBB);
10254 if (++I == FuncInfo.MF->end())
10255 return nullptr;
10256 return &*I;
10257}
10258
10259/// During lowering new call nodes can be created (such as memset, etc.).
10260/// Those will become new roots of the current DAG, but complications arise
10261/// when they are tail calls. In such cases, the call lowering will update
10262/// the root, but the builder still needs to know that a tail call has been
10263/// lowered in order to avoid generating an additional return.
10264void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) {
10265 // If the node is null, we do have a tail call.
10266 if (MaybeTC.getNode() != nullptr)
10267 DAG.setRoot(MaybeTC);
10268 else
10269 HasTailCall = true;
10270}
10271
10272void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
10273 MachineBasicBlock *SwitchMBB,
10274 MachineBasicBlock *DefaultMBB) {
10275 MachineFunction *CurMF = FuncInfo.MF;
10276 MachineBasicBlock *NextMBB = nullptr;
10277 MachineFunction::iterator BBI(W.MBB);
10278 if (++BBI != FuncInfo.MF->end())
10279 NextMBB = &*BBI;
10280
10281 unsigned Size = W.LastCluster - W.FirstCluster + 1;
10282
10283 BranchProbabilityInfo *BPI = FuncInfo.BPI;
10284
10285 if (Size == 2 && W.MBB == SwitchMBB) {
10286 // If any two of the cases has the same destination, and if one value
10287 // is the same as the other, but has one bit unset that the other has set,
10288 // use bit manipulation to do two compares at once. For example:
10289 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
10290 // TODO: This could be extended to merge any 2 cases in switches with 3
10291 // cases.
10292 // TODO: Handle cases where W.CaseBB != SwitchBB.
10293 CaseCluster &Small = *W.FirstCluster;
10294 CaseCluster &Big = *W.LastCluster;
10295
10296 if (Small.Low == Small.High && Big.Low == Big.High &&
10297 Small.MBB == Big.MBB) {
10298 const APInt &SmallValue = Small.Low->getValue();
10299 const APInt &BigValue = Big.Low->getValue();
10300
10301 // Check that there is only one bit different.
10302 APInt CommonBit = BigValue ^ SmallValue;
10303 if (CommonBit.isPowerOf2()) {
10304 SDValue CondLHS = getValue(Cond);
10305 EVT VT = CondLHS.getValueType();
10306 SDLoc DL = getCurSDLoc();
10307
10308 SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
10309 DAG.getConstant(CommonBit, DL, VT));
10310 SDValue Cond = DAG.getSetCC(
10311 DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT),
10312 ISD::SETEQ);
10313
10314 // Update successor info.
10315 // Both Small and Big will jump to Small.BB, so we sum up the
10316 // probabilities.
10317 addSuccessorWithProb(SwitchMBB, Small.MBB, Small.Prob + Big.Prob);
10318 if (BPI)
10319 addSuccessorWithProb(
10320 SwitchMBB, DefaultMBB,
10321 // The default destination is the first successor in IR.
10322 BPI->getEdgeProbability(SwitchMBB->getBasicBlock(), (unsigned)0));
10323 else
10324 addSuccessorWithProb(SwitchMBB, DefaultMBB);
10325
10326 // Insert the true branch.
10327 SDValue BrCond =
10328 DAG.getNode(ISD::BRCOND, DL, MVT::Other, getControlRoot(), Cond,
10329 DAG.getBasicBlock(Small.MBB));
10330 // Insert the false branch.
10331 BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
10332 DAG.getBasicBlock(DefaultMBB));
10333
10334 DAG.setRoot(BrCond);
10335 return;
10336 }
10337 }
10338 }
10339
10340 if (TM.getOptLevel() != CodeGenOpt::None) {
10341 // Here, we order cases by probability so the most likely case will be
10342 // checked first. However, two clusters can have the same probability in
10343 // which case their relative ordering is non-deterministic. So we use Low
10344 // as a tie-breaker as clusters are guaranteed to never overlap.
10345 llvm::sort(W.FirstCluster, W.LastCluster + 1,
10346 [](const CaseCluster &a, const CaseCluster &b) {
10347 return a.Prob != b.Prob ?
10348 a.Prob > b.Prob :
10349 a.Low->getValue().slt(b.Low->getValue());
10350 });
10351
10352 // Rearrange the case blocks so that the last one falls through if possible
10353 // without changing the order of probabilities.
10354 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster; ) {
10355 --I;
10356 if (I->Prob > W.LastCluster->Prob)
10357 break;
10358 if (I->Kind == CC_Range && I->MBB == NextMBB) {
10359 std::swap(*I, *W.LastCluster);
10360 break;
10361 }
10362 }
10363 }
10364
10365 // Compute total probability.
10366 BranchProbability DefaultProb = W.DefaultProb;
10367 BranchProbability UnhandledProbs = DefaultProb;
10368 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
10369 UnhandledProbs += I->Prob;
10370
10371 MachineBasicBlock *CurMBB = W.MBB;
10372 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
10373 bool FallthroughUnreachable = false;
10374 MachineBasicBlock *Fallthrough;
10375 if (I == W.LastCluster) {
10376 // For the last cluster, fall through to the default destination.
10377 Fallthrough = DefaultMBB;
10378 FallthroughUnreachable = isa<UnreachableInst>(
10379 DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
10380 } else {
10381 Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
10382 CurMF->insert(BBI, Fallthrough);
10383 // Put Cond in a virtual register to make it available from the new blocks.
10384 ExportFromCurrentBlock(Cond);
10385 }
10386 UnhandledProbs -= I->Prob;
10387
10388 switch (I->Kind) {
10389 case CC_JumpTable: {
10390 // FIXME: Optimize away range check based on pivot comparisons.
10391 JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
10392 SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
10393
10394 // The jump block hasn't been inserted yet; insert it here.
10395 MachineBasicBlock *JumpMBB = JT->MBB;
10396 CurMF->insert(BBI, JumpMBB);
10397
10398 auto JumpProb = I->Prob;
10399 auto FallthroughProb = UnhandledProbs;
10400
10401 // If the default statement is a target of the jump table, we evenly
10402 // distribute the default probability to successors of CurMBB. Also
10403 // update the probability on the edge from JumpMBB to Fallthrough.
10404 for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
10405 SE = JumpMBB->succ_end();
10406 SI != SE; ++SI) {
10407 if (*SI == DefaultMBB) {
10408 JumpProb += DefaultProb / 2;
10409 FallthroughProb -= DefaultProb / 2;
10410 JumpMBB->setSuccProbability(SI, DefaultProb / 2);
10411 JumpMBB->normalizeSuccProbs();
10412 break;
10413 }
10414 }
10415
10416 if (FallthroughUnreachable) {
10417 // Skip the range check if the fallthrough block is unreachable.
10418 JTH->OmitRangeCheck = true;
10419 }
10420
10421 if (!JTH->OmitRangeCheck)
10422 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
10423 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
10424 CurMBB->normalizeSuccProbs();
10425
10426 // The jump table header will be inserted in our current block, do the
10427 // range check, and fall through to our fallthrough block.
10428 JTH->HeaderBB = CurMBB;
10429 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
10430
10431 // If we're in the right place, emit the jump table header right now.
10432 if (CurMBB == SwitchMBB) {
10433 visitJumpTableHeader(*JT, *JTH, SwitchMBB);
10434 JTH->Emitted = true;
10435 }
10436 break;
10437 }
10438 case CC_BitTests: {
10439 // FIXME: Optimize away range check based on pivot comparisons.
10440 BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];
10441
10442 // The bit test blocks haven't been inserted yet; insert them here.
10443 for (BitTestCase &BTC : BTB->Cases)
10444 CurMF->insert(BBI, BTC.ThisBB);
10445
10446 // Fill in fields of the BitTestBlock.
10447 BTB->Parent = CurMBB;
10448 BTB->Default = Fallthrough;
10449
10450 BTB->DefaultProb = UnhandledProbs;
10451 // If the cases in bit test don't form a contiguous range, we evenly
10452 // distribute the probability on the edge to Fallthrough to two
10453 // successors of CurMBB.
10454 if (!BTB->ContiguousRange) {
10455 BTB->Prob += DefaultProb / 2;
10456 BTB->DefaultProb -= DefaultProb / 2;
10457 }
10458
10459 if (FallthroughUnreachable) {
10460 // Skip the range check if the fallthrough block is unreachable.
10461 BTB->OmitRangeCheck = true;
10462 }
10463
10464 // If we're in the right place, emit the bit test header right now.
10465 if (CurMBB == SwitchMBB) {
10466 visitBitTestHeader(*BTB, SwitchMBB);
10467 BTB->Emitted = true;
10468 }
10469 break;
10470 }
10471 case CC_Range: {
10472 const Value *RHS, *LHS, *MHS;
10473 ISD::CondCode CC;
10474 if (I->Low == I->High) {
10475 // Check Cond == I->Low.
10476 CC = ISD::SETEQ;
10477 LHS = Cond;
10478 RHS=I->Low;
10479 MHS = nullptr;
10480 } else {
10481 // Check I->Low <= Cond <= I->High.
10482 CC = ISD::SETLE;
10483 LHS = I->Low;
10484 MHS = Cond;
10485 RHS = I->High;
10486 }
10487
10488 // If Fallthrough is unreachable, fold away the comparison.
10489 if (FallthroughUnreachable)
10490 CC = ISD::SETTRUE;
10491
10492 // The false probability is the sum of all unhandled cases.
10493 CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB,
10494 getCurSDLoc(), I->Prob, UnhandledProbs);
10495
10496 if (CurMBB == SwitchMBB)
10497 visitSwitchCase(CB, SwitchMBB);
10498 else
10499 SL->SwitchCases.push_back(CB);
10500
10501 break;
10502 }
10503 }
10504 CurMBB = Fallthrough;
10505 }
10506}
10507
10508unsigned SelectionDAGBuilder::caseClusterRank(const CaseCluster &CC,
10509 CaseClusterIt First,
10510 CaseClusterIt Last) {
10511 return std::count_if(First, Last + 1, [&](const CaseCluster &X) {
10512 if (X.Prob != CC.Prob)
10513 return X.Prob > CC.Prob;
10514
10515 // Ties are broken by comparing the case value.
10516 return X.Low->getValue().slt(CC.Low->getValue());
10517 });
10518}
10519
10520void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList,
10521 const SwitchWorkListItem &W,
10522 Value *Cond,
10523 MachineBasicBlock *SwitchMBB) {
10524 assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
10525 "Clusters not sorted?");
10526
10527 assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
10528
10529 // Balance the tree based on branch probabilities to create a near-optimal (in
10530 // terms of search time given key frequency) binary search tree. See e.g. Kurt
10531 // Mehlhorn "Nearly Optimal Binary Search Trees" (1975).
10532 CaseClusterIt LastLeft = W.FirstCluster;
10533 CaseClusterIt FirstRight = W.LastCluster;
10534 auto LeftProb = LastLeft->Prob + W.DefaultProb / 2;
10535 auto RightProb = FirstRight->Prob + W.DefaultProb / 2;
10536
10537 // Move LastLeft and FirstRight towards each other from opposite directions to
10538 // find a partitioning of the clusters which balances the probability on both
10539 // sides. If LeftProb and RightProb are equal, alternate which side is
10540 // taken to ensure 0-probability nodes are distributed evenly.
10541 unsigned I = 0;
10542 while (LastLeft + 1 < FirstRight) {
10543 if (LeftProb < RightProb || (LeftProb == RightProb && (I & 1)))
10544 LeftProb += (++LastLeft)->Prob;
10545 else
10546 RightProb += (--FirstRight)->Prob;
10547 I++;
10548 }
10549
10550 while (true) {
10551 // Our binary search tree differs from a typical BST in that ours can have up
10552 // to three values in each leaf. The pivot selection above doesn't take that
10553 // into account, which means the tree might require more nodes and be less
10554 // efficient. We compensate for this here.
10555
10556 unsigned NumLeft = LastLeft - W.FirstCluster + 1;
10557 unsigned NumRight = W.LastCluster - FirstRight + 1;
10558
10559 if (std::min(NumLeft, NumRight) < 3 && std::max(NumLeft, NumRight) > 3) {
10560 // If one side has less than 3 clusters, and the other has more than 3,
10561 // consider taking a cluster from the other side.
10562
10563 if (NumLeft < NumRight) {
10564 // Consider moving the first cluster on the right to the left side.
10565 CaseCluster &CC = *FirstRight;
10566 unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
10567 unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
10568 if (LeftSideRank <= RightSideRank) {
10569 // Moving the cluster to the left does not demote it.
10570 ++LastLeft;
10571 ++FirstRight;
10572 continue;
10573 }
10574 } else {
10575 assert(NumRight < NumLeft);
10576 // Consider moving the last element on the left to the right side.
10577 CaseCluster &CC = *LastLeft;
10578 unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
10579 unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
10580 if (RightSideRank <= LeftSideRank) {
10581 // Moving the cluster to the right does not demot it.
10582 --LastLeft;
10583 --FirstRight;
10584 continue;
10585 }
10586 }
10587 }
10588 break;
10589 }
10590
10591 assert(LastLeft + 1 == FirstRight);
10592 assert(LastLeft >= W.FirstCluster);
10593 assert(FirstRight <= W.LastCluster);
10594
10595 // Use the first element on the right as pivot since we will make less-than
10596 // comparisons against it.
10597 CaseClusterIt PivotCluster = FirstRight;
10598 assert(PivotCluster > W.FirstCluster);
10599 assert(PivotCluster <= W.LastCluster);
10600
10601 CaseClusterIt FirstLeft = W.FirstCluster;
10602 CaseClusterIt LastRight = W.LastCluster;
10603
10604 const ConstantInt *Pivot = PivotCluster->Low;
10605
10606 // New blocks will be inserted immediately after the current one.
10607 MachineFunction::iterator BBI(W.MBB);
10608 ++BBI;
10609
10610 // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
10611 // we can branch to its destination directly if it's squeezed exactly in
10612 // between the known lower bound and Pivot - 1.
10613 MachineBasicBlock *LeftMBB;
10614 if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
10615 FirstLeft->Low == W.GE &&
10616 (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
10617 LeftMBB = FirstLeft->MBB;
10618 } else {
10619 LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
10620 FuncInfo.MF->insert(BBI, LeftMBB);
10621 WorkList.push_back(
10622 {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});
10623 // Put Cond in a virtual register to make it available from the new blocks.
10624 ExportFromCurrentBlock(Cond);
10625 }
10626
10627 // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
10628 // single cluster, RHS.Low == Pivot, and we can branch to its destination
10629 // directly if RHS.High equals the current upper bound.
10630 MachineBasicBlock *RightMBB;
10631 if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
10632 W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
10633 RightMBB = FirstRight->MBB;
10634 } else {
10635 RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
10636 FuncInfo.MF->insert(BBI, RightMBB);
10637 WorkList.push_back(
10638 {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});
10639 // Put Cond in a virtual register to make it available from the new blocks.
10640 ExportFromCurrentBlock(Cond);
10641 }
10642
10643 // Create the CaseBlock record that will be used to lower the branch.
10644 CaseBlock CB(ISD::SETLT, Cond, Pivot, nullptr, LeftMBB, RightMBB, W.MBB,
10645 getCurSDLoc(), LeftProb, RightProb);
10646
10647 if (W.MBB == SwitchMBB)
10648 visitSwitchCase(CB, SwitchMBB);
10649 else
10650 SL->SwitchCases.push_back(CB);
10651}
10652
10653// Scale CaseProb after peeling a case with the probablity of PeeledCaseProb
10654// from the swith statement.
10655static BranchProbability scaleCaseProbality(BranchProbability CaseProb,
10656 BranchProbability PeeledCaseProb) {
10657 if (PeeledCaseProb == BranchProbability::getOne())
10658 return BranchProbability::getZero();
10659 BranchProbability SwitchProb = PeeledCaseProb.getCompl();
10660
10661 uint32_t Numerator = CaseProb.getNumerator();
10662 uint32_t Denominator = SwitchProb.scale(CaseProb.getDenominator());
10663 return BranchProbability(Numerator, std::max(Numerator, Denominator));
10664}
10665
10666// Try to peel the top probability case if it exceeds the threshold.
10667// Return current MachineBasicBlock for the switch statement if the peeling
10668// does not occur.
10669// If the peeling is performed, return the newly created MachineBasicBlock
10670// for the peeled switch statement. Also update Clusters to remove the peeled
10671// case. PeeledCaseProb is the BranchProbability for the peeled case.
10672MachineBasicBlock *SelectionDAGBuilder::peelDominantCaseCluster(
10673 const SwitchInst &SI, CaseClusterVector &Clusters,
10674 BranchProbability &PeeledCaseProb) {
10675 MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
10676 // Don't perform if there is only one cluster or optimizing for size.
10677 if (SwitchPeelThreshold > 100 || !FuncInfo.BPI || Clusters.size() < 2 ||
10678 TM.getOptLevel() == CodeGenOpt::None ||
10679 SwitchMBB->getParent()->getFunction().hasMinSize())
10680 return SwitchMBB;
10681
10682 BranchProbability TopCaseProb = BranchProbability(SwitchPeelThreshold, 100);
10683 unsigned PeeledCaseIndex = 0;
10684 bool SwitchPeeled = false;
10685 for (unsigned Index = 0; Index < Clusters.size(); ++Index) {
10686 CaseCluster &CC = Clusters[Index];
10687 if (CC.Prob < TopCaseProb)
10688 continue;
10689 TopCaseProb = CC.Prob;
10690 PeeledCaseIndex = Index;
10691 SwitchPeeled = true;
10692 }
10693 if (!SwitchPeeled)
10694 return SwitchMBB;
10695
10696 LLVM_DEBUG(dbgs() << "Peeled one top case in switch stmt, prob: "
10697 << TopCaseProb << "\n");
10698
10699 // Record the MBB for the peeled switch statement.
10700 MachineFunction::iterator BBI(SwitchMBB);
10701 ++BBI;
10702 MachineBasicBlock *PeeledSwitchMBB =
10703 FuncInfo.MF->CreateMachineBasicBlock(SwitchMBB->getBasicBlock());
10704 FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
10705
10706 ExportFromCurrentBlock(SI.getCondition());
10707 auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
10708 SwitchWorkListItem W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
10709 nullptr, nullptr, TopCaseProb.getCompl()};
10710 lowerWorkItem(W, SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
10711
10712 Clusters.erase(PeeledCaseIt);
10713 for (CaseCluster &CC : Clusters) {
10714 LLVM_DEBUG(
10715 dbgs() << "Scale the probablity for one cluster, before scaling: "
10716 << CC.Prob << "\n");
10717 CC.Prob = scaleCaseProbality(CC.Prob, TopCaseProb);
10718 LLVM_DEBUG(dbgs() << "After scaling: " << CC.Prob << "\n");
10719 }
10720 PeeledCaseProb = TopCaseProb;
10721 return PeeledSwitchMBB;
10722}
10723
10724void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
10725 // Extract cases from the switch.
10726 BranchProbabilityInfo *BPI = FuncInfo.BPI;
10727 CaseClusterVector Clusters;
10728 Clusters.reserve(SI.getNumCases());
10729 for (auto I : SI.cases()) {
10730 MachineBasicBlock *Succ = FuncInfo.MBBMap[I.getCaseSuccessor()];
10731 const ConstantInt *CaseVal = I.getCaseValue();
10732 BranchProbability Prob =
10733 BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
10734 : BranchProbability(1, SI.getNumCases() + 1);
10735 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
10736 }
10737
10738 MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[SI.getDefaultDest()];
10739
10740 // Cluster adjacent cases with the same destination. We do this at all
10741 // optimization levels because it's cheap to do and will make codegen faster
10742 // if there are many clusters.
10743 sortAndRangeify(Clusters);
10744
10745 // The branch probablity of the peeled case.
10746 BranchProbability PeeledCaseProb = BranchProbability::getZero();
10747 MachineBasicBlock *PeeledSwitchMBB =
10748 peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
10749
10750 // If there is only the default destination, jump there directly.
10751 MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
10752 if (Clusters.empty()) {
10753 assert(PeeledSwitchMBB == SwitchMBB);
10754 SwitchMBB->addSuccessor(DefaultMBB);
10755 if (DefaultMBB != NextBlock(SwitchMBB)) {
10756 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
10757 getControlRoot(), DAG.getBasicBlock(DefaultMBB)));
10758 }
10759 return;
10760 }
10761
10762 SL->findJumpTables(Clusters, &SI, DefaultMBB, DAG.getPSI(), DAG.getBFI());
10763 SL->findBitTestClusters(Clusters, &SI);
10764
10765 LLVM_DEBUG({
10766 dbgs() << "Case clusters: ";
10767 for (const CaseCluster &C : Clusters) {
10768 if (C.Kind == CC_JumpTable)
10769 dbgs() << "JT:";
10770 if (C.Kind == CC_BitTests)
10771 dbgs() << "BT:";
10772
10773 C.Low->getValue().print(dbgs(), true);
10774 if (C.Low != C.High) {
10775 dbgs() << '-';
10776 C.High->getValue().print(dbgs(), true);
10777 }
10778 dbgs() << ' ';
10779 }
10780 dbgs() << '\n';
10781 });
10782
10783 assert(!Clusters.empty());
10784 SwitchWorkList WorkList;
10785 CaseClusterIt First = Clusters.begin();
10786 CaseClusterIt Last = Clusters.end() - 1;
10787 auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
10788 // Scale the branchprobability for DefaultMBB if the peel occurs and
10789 // DefaultMBB is not replaced.
10790 if (PeeledCaseProb != BranchProbability::getZero() &&
10791 DefaultMBB == FuncInfo.MBBMap[SI.getDefaultDest()])
10792 DefaultProb = scaleCaseProbality(DefaultProb, PeeledCaseProb);
10793 WorkList.push_back(
10794 {PeeledSwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
10795
10796 while (!WorkList.empty()) {
10797 SwitchWorkListItem W = WorkList.pop_back_val();
10798 unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
10799
10800 if (NumClusters > 3 && TM.getOptLevel() != CodeGenOpt::None &&
10801 !DefaultMBB->getParent()->getFunction().hasMinSize()) {
10802 // For optimized builds, lower large range as a balanced binary tree.
10803 splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB);
10804 continue;
10805 }
10806
10807 lowerWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB);
10808 }
10809}
10810
10811void SelectionDAGBuilder::visitFreeze(const FreezeInst &I) {
10812 SmallVector<EVT, 4> ValueVTs;
10813 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
10814 ValueVTs);
10815 unsigned NumValues = ValueVTs.size();
10816 if (NumValues == 0) return;
10817
10818 SmallVector<SDValue, 4> Values(NumValues);
10819 SDValue Op = getValue(I.getOperand(0));
10820
10821 for (unsigned i = 0; i != NumValues; ++i)
10822 Values[i] = DAG.getNode(ISD::FREEZE, getCurSDLoc(), ValueVTs[i],
10823 SDValue(Op.getNode(), Op.getResNo() + i));
10824
10825 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
10826 DAG.getVTList(ValueVTs), Values));
10827}
10828